diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1b5abab6b5..a1c670b1ac 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -11,7 +11,7 @@
This [github document](https://help.github.com/articles/creating-a-pull-request/) provides some guidance on how to create a pull request in github.
## PR requirement
-To pursue engineering excellence, we have insisted on the highest stardard on the quality of each PR.
+To pursue engineering excellence, we have insisted on the highest standard for the quality of each PR.
* For each PR, please run [golint](https://github.com/golang/lint), [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports), to fix the basic issues/warnings.
* Make sure you understand [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/).
@@ -21,7 +21,7 @@ To pursue engineering excellence, we have insisted on the highest stardard on th
The best practice is to reorder and squash your local commits before the PR submission to create an atomic and self-contained PR.
This [book chapter](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History) provides detailed explanation and guidance on how to rewrite the local git history.
-For exampple, a typical workflow is like the following.
+For example, a typical workflow is like the following.
```bash
# assuming you are working on a fix of bug1, and use a local branch called "fixes_of_bug1".
diff --git a/Makefile b/Makefile
index deb990bf50..906e8c06a0 100644
--- a/Makefile
+++ b/Makefile
@@ -179,4 +179,4 @@ debug_external: clean
bash test/debug-external.sh
build_localnet_validator:
- bash test/build-localnet-validator.sh
\ No newline at end of file
+ bash test/build-localnet-validator.sh
diff --git a/README.md b/README.md
index 0f2aa11f96..5d2b1bd4cf 100644
--- a/README.md
+++ b/README.md
@@ -150,7 +150,7 @@ make debug-kill
To keep things consistent, we have a docker image to run all tests. **These are the same tests ran on the pull request checks**.
-Note that all testing docker container binds a couple of ports to the host machine for your convince. The ports are:
+Note that all test Docker containers bind several ports to the host machine for your convenience. The ports are:
* `9500` - Shard 0 RPC for a validator
* `9501` - Shard 1 RPC for a validator
* `9599` - Shard 0 RPC for an explorer
@@ -174,7 +174,7 @@ To run this test, do:
make test-rpc
```
This test starts a localnet (within the Docker container), **ensures it reaches a consensus**, and runs a series of tests to ensure correct RPC behavior.
-This test also acts as a preliminary integration test (more through tests are done on the testnets).
+This test also acts as a preliminary integration test (more thorough tests are done on the testnets).
> The tests ran by this command can be found [here](https://github.com/harmony-one/harmony-test/tree/master/localnet).
If you wish to debug further with the localnet after the tests are done, open a new shell and run:
@@ -194,7 +194,7 @@ To run this test, do:
make test-rosetta
```
This test starts a localnet (within the Docker container), **ensures it reaches a consensus**, and runs the Construction & Data API checks using the [rosetta-cli](https://github.com/coinbase/rosetta-cli).
-This test also acts as a preliminary integration test (more through tests are done on the testnets).
+This test also acts as a preliminary integration test (more thorough tests are done on the testnets).
> The config for this test can be found [here](https://github.com/harmony-one/harmony-test/blob/master/localnet/configs/localnet_rosetta_test_s0.json) & [here](https://github.com/harmony-one/harmony-test/blob/master/localnet/configs/localnet_rosetta_test_s1.json)
Similar to the RPC tests, if you wish to debug further with the localnet after the tests are done, open a new shell and run:
diff --git a/api/service/legacysync/epoch_syncing.go b/api/service/legacysync/epoch_syncing.go
index e4453cb69c..7719c8a819 100644
--- a/api/service/legacysync/epoch_syncing.go
+++ b/api/service/legacysync/epoch_syncing.go
@@ -138,6 +138,9 @@ func syncLoop(bc core.BlockChain, syncConfig *SyncConfig) (timeout int) {
err := ProcessStateSync(syncConfig, heights, bc)
if err != nil {
+ if errors.Is(err, core.ErrKnownBlock) {
+ return 10
+ }
utils.Logger().Error().Err(err).
Msgf("[EPOCHSYNC] ProcessStateSync failed (isBeacon: %t, ShardID: %d, otherEpoch: %d, currentEpoch: %d)",
isBeacon, bc.ShardID(), otherEpoch, curEpoch)
@@ -199,8 +202,18 @@ func processWithPayload(payload [][]byte, bc core.BlockChain) error {
decoded = append(decoded, block)
}
- _, err := bc.InsertChain(decoded, true)
- return err
+ for _, block := range decoded {
+ _, err := bc.InsertChain([]*types.Block{block}, true)
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ continue
+ case err != nil:
+ return err
+ default:
+ }
+ }
+
+ return nil
}
// CreateSyncConfig creates SyncConfig for StateSync object.
diff --git a/api/service/legacysync/syncing.go b/api/service/legacysync/syncing.go
index 44c5de64a2..a85a5e9d51 100644
--- a/api/service/legacysync/syncing.go
+++ b/api/service/legacysync/syncing.go
@@ -860,11 +860,12 @@ func (ss *StateSync) getBlockFromLastMileBlocksByParentHash(parentHash common.Ha
}
// UpdateBlockAndStatus ...
-func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain, verifyAllSig bool) error {
+func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain) error {
if block.NumberU64() != bc.CurrentBlock().NumberU64()+1 {
utils.Logger().Debug().Uint64("curBlockNum", bc.CurrentBlock().NumberU64()).Uint64("receivedBlockNum", block.NumberU64()).Msg("[SYNC] Inappropriate block number, ignore!")
return nil
}
+ verifyAllSig := true
haveCurrentSig := len(block.GetCurrentCommitSig()) != 0
// Verify block signatures
@@ -904,7 +905,17 @@ func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain
}
_, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */)
- if err != nil {
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ utils.Logger().Info().
+ Uint64("blockHeight", block.NumberU64()).
+ Uint64("blockEpoch", block.Epoch().Uint64()).
+ Str("blockHex", block.Hash().Hex()).
+ Uint32("ShardID", block.ShardID()).
+ Err(err).
+ Msg("[SYNC] UpdateBlockAndStatus: Block exists")
+ return nil
+ case err != nil:
utils.Logger().Error().
Err(err).
Msgf(
@@ -913,6 +924,7 @@ func (ss *StateSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain
block.ShardID(),
)
return err
+ default:
}
utils.Logger().Info().
Uint64("blockHeight", block.NumberU64()).
@@ -944,8 +956,8 @@ func (ss *StateSync) generateNewState(bc core.BlockChain) error {
break
}
// Enforce sig check for the last block in a batch
- enforceSigCheck := !commonIter.HasNext()
- err = ss.UpdateBlockAndStatus(block, bc, enforceSigCheck)
+ _ = !commonIter.HasNext()
+ err = ss.UpdateBlockAndStatus(block, bc)
if err != nil {
break
}
@@ -962,7 +974,7 @@ func (ss *StateSync) generateNewState(bc core.BlockChain) error {
if block == nil {
break
}
- err = ss.UpdateBlockAndStatus(block, bc, true)
+ err = ss.UpdateBlockAndStatus(block, bc)
if err != nil {
break
}
@@ -983,7 +995,7 @@ func (ss *StateSync) generateNewState(bc core.BlockChain) error {
if block == nil {
break
}
- err = ss.UpdateBlockAndStatus(block, bc, false)
+ err = ss.UpdateBlockAndStatus(block, bc)
if err != nil {
break
}
@@ -1111,6 +1123,9 @@ func (ss *StateSync) SyncLoop(bc core.BlockChain, isBeacon bool, consensus *cons
}
err := ss.ProcessStateSync(startHash[:], size, bc)
if err != nil {
+ if errors.Is(err, core.ErrKnownBlock) {
+ continue
+ }
utils.Logger().Error().Err(err).
Msgf("[SYNC] ProcessStateSync failed (isBeacon: %t, ShardID: %d, otherHeight: %d, currentHeight: %d)",
isBeacon, bc.ShardID(), otherHeight, currentHeight)
@@ -1148,7 +1163,11 @@ func (ss *StateSync) addConsensusLastMile(bc core.BlockChain, consensus *consens
if block == nil {
break
}
- if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil {
+ _, err := bc.InsertChain(types.Blocks{block}, true)
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ case errors.Is(err, core.ErrNotLastBlockInEpoch):
+ case err != nil:
return errors.Wrap(err, "failed to InsertChain")
}
}
diff --git a/api/service/stagedstreamsync/adapter.go b/api/service/stagedstreamsync/adapter.go
index ca9c6a6787..56c42b661c 100644
--- a/api/service/stagedstreamsync/adapter.go
+++ b/api/service/stagedstreamsync/adapter.go
@@ -9,6 +9,7 @@ import (
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/p2p/stream/common/streammanager"
syncproto "github.com/harmony-one/harmony/p2p/stream/protocols/sync"
+ "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
)
@@ -20,6 +21,10 @@ type syncProtocol interface {
GetBlocksByHashes(ctx context.Context, hs []common.Hash, opts ...syncproto.Option) ([]*types.Block, sttypes.StreamID, error)
GetReceipts(ctx context.Context, hs []common.Hash, opts ...syncproto.Option) (receipts []types.Receipts, stid sttypes.StreamID, err error)
GetNodeData(ctx context.Context, hs []common.Hash, opts ...syncproto.Option) (data [][]byte, stid sttypes.StreamID, err error)
+ GetAccountRange(ctx context.Context, root common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...syncproto.Option) (accounts []*message.AccountData, proof [][]byte, stid sttypes.StreamID, err error)
+ GetStorageRanges(ctx context.Context, root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...syncproto.Option) (slots [][]*message.StorageData, proof [][]byte, stid sttypes.StreamID, err error)
+ GetByteCodes(ctx context.Context, hs []common.Hash, bytes uint64, opts ...syncproto.Option) (codes [][]byte, stid sttypes.StreamID, err error)
+ GetTrieNodes(ctx context.Context, root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, opts ...syncproto.Option) (nodes [][]byte, stid sttypes.StreamID, err error)
RemoveStream(stID sttypes.StreamID) // If a stream delivers invalid data, remove the stream
StreamFailed(stID sttypes.StreamID, reason string)
diff --git a/api/service/stagedstreamsync/beacon_helper.go b/api/service/stagedstreamsync/beacon_helper.go
index a996f368bf..4fe70dc23d 100644
--- a/api/service/stagedstreamsync/beacon_helper.go
+++ b/api/service/stagedstreamsync/beacon_helper.go
@@ -1,8 +1,10 @@
package stagedstreamsync
import (
+ "errors"
"time"
+ "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
"github.com/rs/zerolog"
@@ -126,7 +128,8 @@ func (bh *beaconHelper) insertLastMileBlocks() (inserted int, bn uint64, err err
}
// TODO: Instruct the beacon helper to verify signatures. This may require some forks
// in pub-sub message (add commit sigs in node.block.sync messages)
- if _, err = bh.bc.InsertChain(types.Blocks{b}, true); err != nil {
+ _, err = bh.bc.InsertChain(types.Blocks{b}, true)
+ if err != nil && !errors.Is(err, core.ErrKnownBlock) {
bn--
return
}
diff --git a/api/service/stagedstreamsync/block_manager.go b/api/service/stagedstreamsync/block_manager.go
index 28c966b4d6..d614d24205 100644
--- a/api/service/stagedstreamsync/block_manager.go
+++ b/api/service/stagedstreamsync/block_manager.go
@@ -1,8 +1,10 @@
package stagedstreamsync
import (
+ "fmt"
"sync"
+ "github.com/ethereum/go-ethereum/common"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/rs/zerolog"
@@ -11,6 +13,7 @@ import (
type BlockDownloadDetails struct {
loopID int
streamID sttypes.StreamID
+ rootHash common.Hash
}
// blockDownloadManager is the helper structure for get blocks request management
@@ -19,11 +22,11 @@ type blockDownloadManager struct {
tx kv.RwTx
targetBN uint64
- requesting map[uint64]struct{} // block numbers that have been assigned to workers but not received
- processing map[uint64]struct{} // block numbers received requests but not inserted
- retries *prioritizedNumbers // requests where error happens
- rq *resultQueue // result queue wait to be inserted into blockchain
- bdd map[uint64]BlockDownloadDetails // details about how this block was downloaded
+ requesting map[uint64]struct{} // block numbers that have been assigned to workers but not received
+ processing map[uint64]struct{} // block numbers received requests but not inserted
+ retries *prioritizedNumbers // requests where error happens
+ rq *resultQueue // result queue wait to be inserted into blockchain
+ bdd map[uint64]*BlockDownloadDetails // details about how this block was downloaded
logger zerolog.Logger
lock sync.Mutex
@@ -38,26 +41,26 @@ func newBlockDownloadManager(tx kv.RwTx, chain blockChain, targetBN uint64, logg
processing: make(map[uint64]struct{}),
retries: newPrioritizedNumbers(),
rq: newResultQueue(),
- bdd: make(map[uint64]BlockDownloadDetails),
+ bdd: make(map[uint64]*BlockDownloadDetails),
logger: logger,
}
}
// GetNextBatch get the next block numbers batch
-func (gbm *blockDownloadManager) GetNextBatch() []uint64 {
+func (gbm *blockDownloadManager) GetNextBatch(curHeight uint64) []uint64 {
gbm.lock.Lock()
defer gbm.lock.Unlock()
cap := BlocksPerRequest
- bns := gbm.getBatchFromRetries(cap)
+ bns := gbm.getBatchFromRetries(cap, curHeight)
if len(bns) > 0 {
cap -= len(bns)
gbm.addBatchToRequesting(bns)
}
if gbm.availableForMoreTasks() {
- addBNs := gbm.getBatchFromUnprocessed(cap)
+ addBNs := gbm.getBatchFromUnprocessed(cap, curHeight)
gbm.addBatchToRequesting(addBNs)
bns = append(bns, addBNs...)
}
@@ -88,7 +91,7 @@ func (gbm *blockDownloadManager) HandleRequestResult(bns []uint64, blockBytes []
gbm.retries.push(bn)
} else {
gbm.processing[bn] = struct{}{}
- gbm.bdd[bn] = BlockDownloadDetails{
+ gbm.bdd[bn] = &BlockDownloadDetails{
loopID: loopID,
streamID: streamID,
}
@@ -107,7 +110,7 @@ func (gbm *blockDownloadManager) SetDownloadDetails(bns []uint64, loopID int, st
defer gbm.lock.Unlock()
for _, bn := range bns {
- gbm.bdd[bn] = BlockDownloadDetails{
+ gbm.bdd[bn] = &BlockDownloadDetails{
loopID: loopID,
streamID: streamID,
}
@@ -116,25 +119,43 @@ func (gbm *blockDownloadManager) SetDownloadDetails(bns []uint64, loopID int, st
}
// GetDownloadDetails returns the download details for a block
-func (gbm *blockDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID int, streamID sttypes.StreamID) {
+func (gbm *blockDownloadManager) GetDownloadDetails(blockNumber uint64) (loopID int, streamID sttypes.StreamID, err error) {
gbm.lock.Lock()
defer gbm.lock.Unlock()
- return gbm.bdd[blockNumber].loopID, gbm.bdd[blockNumber].streamID
+ if dm, exist := gbm.bdd[blockNumber]; exist {
+ return dm.loopID, dm.streamID, nil
+ }
+ return 0, sttypes.StreamID(fmt.Sprint(0)), fmt.Errorf("there is no download details for the block number: %d", blockNumber)
+}
+
+// SetRootHash sets the root hash for a specific block
+func (gbm *blockDownloadManager) SetRootHash(blockNumber uint64, root common.Hash) {
+ gbm.lock.Lock()
+ defer gbm.lock.Unlock()
+
+ gbm.bdd[blockNumber].rootHash = root
+}
+
+// GetRootHash returns the root hash for a specific block
+func (gbm *blockDownloadManager) GetRootHash(blockNumber uint64) common.Hash {
+ gbm.lock.Lock()
+ defer gbm.lock.Unlock()
+
+ return gbm.bdd[blockNumber].rootHash
}
// getBatchFromRetries get the block number batch to be requested from retries.
-func (gbm *blockDownloadManager) getBatchFromRetries(cap int) []uint64 {
+func (gbm *blockDownloadManager) getBatchFromRetries(cap int, fromBlockNumber uint64) []uint64 {
var (
requestBNs []uint64
- curHeight = gbm.chain.CurrentBlock().NumberU64()
)
for cnt := 0; cnt < cap; cnt++ {
bn := gbm.retries.pop()
if bn == 0 {
break // no more retries
}
- if bn <= curHeight {
+ if bn <= fromBlockNumber {
continue
}
requestBNs = append(requestBNs, bn)
@@ -143,10 +164,9 @@ func (gbm *blockDownloadManager) getBatchFromRetries(cap int) []uint64 {
}
// getBatchFromUnprocessed returns a batch of block numbers to be requested from unprocessed.
-func (gbm *blockDownloadManager) getBatchFromUnprocessed(cap int) []uint64 {
+func (gbm *blockDownloadManager) getBatchFromUnprocessed(cap int, curHeight uint64) []uint64 {
var (
requestBNs []uint64
- curHeight = gbm.chain.CurrentBlock().NumberU64()
)
bn := curHeight + 1
// TODO: this algorithm can be potentially optimized.
diff --git a/api/service/stagedstreamsync/const.go b/api/service/stagedstreamsync/const.go
index 048b5d812d..2789bfb1e3 100644
--- a/api/service/stagedstreamsync/const.go
+++ b/api/service/stagedstreamsync/const.go
@@ -23,9 +23,35 @@ const (
// no more request will be assigned to workers to wait for InsertChain to finish.
SoftQueueCap int = 100
+ // number of get nodes by hashes for each request
+ StatesPerRequest int = 100
+
+ // maximum number of blocks for get receipts request
+ ReceiptsPerRequest int = 10
+
+ // DefaultConcurrency is the default settings for concurrency
+ DefaultConcurrency int = 4
+
+ // MaxTriesToFetchNodeData is the maximum number of tries to fetch node data
+ MaxTriesToFetchNodeData int = 5
+
// ShortRangeTimeout is the timeout for each short range sync, which allow short range sync
// to restart automatically when stuck in `getBlockHashes`
ShortRangeTimeout time.Duration = 1 * time.Minute
+
+ // pivot block distance ranges
+ MinPivotDistanceToHead uint64 = 1024
+ MaxPivotDistanceToHead uint64 = 2048
+)
+
+// SyncMode represents the synchronization mode of the downloader.
+// It is a uint32 as it is used with atomic operations.
+type SyncMode uint32
+
+const (
+ FullSync SyncMode = iota // Synchronize the entire blockchain history from full blocks
+ FastSync // Download all blocks and states
+ SnapSync // Download the chain and the state via compact snapshots
)
type (
@@ -35,6 +61,9 @@ type (
// TODO: remove this when stream sync is fully up.
ServerOnly bool
+ // Synchronization mode of the downloader
+ SyncMode SyncMode
+
// parameters
Network nodeconfig.NetworkType
Concurrency int // Number of concurrent sync requests
diff --git a/api/service/stagedstreamsync/default_stages.go b/api/service/stagedstreamsync/default_stages.go
index 55986ff6e8..fe64e26d4c 100644
--- a/api/service/stagedstreamsync/default_stages.go
+++ b/api/service/stagedstreamsync/default_stages.go
@@ -8,35 +8,91 @@ type ForwardOrder []SyncStageID
type RevertOrder []SyncStageID
type CleanUpOrder []SyncStageID
-var DefaultForwardOrder = ForwardOrder{
- Heads,
- SyncEpoch,
- ShortRange,
- BlockBodies,
- // Stages below don't use Internet
- States,
- LastMile,
- Finish,
+var (
+ StagesForwardOrder ForwardOrder
+ StagesRevertOrder RevertOrder
+ StagesCleanUpOrder CleanUpOrder
+)
+
+func initStagesOrder(syncMode SyncMode) {
+ switch syncMode {
+ case FullSync:
+ initFullSyncStagesOrder()
+ case FastSync:
+ initFastSyncStagesOrder()
+ default:
+ panic("not supported sync mode")
+ }
}
-var DefaultRevertOrder = RevertOrder{
- Finish,
- LastMile,
- States,
- BlockBodies,
- ShortRange,
- SyncEpoch,
- Heads,
+func initFullSyncStagesOrder() {
+ StagesForwardOrder = ForwardOrder{
+ Heads,
+ SyncEpoch,
+ ShortRange,
+ BlockBodies,
+ States,
+ LastMile,
+ Finish,
+ }
+
+ StagesRevertOrder = RevertOrder{
+ Finish,
+ LastMile,
+ States,
+ BlockBodies,
+ ShortRange,
+ SyncEpoch,
+ Heads,
+ }
+
+ StagesCleanUpOrder = CleanUpOrder{
+ Finish,
+ LastMile,
+ States,
+ BlockBodies,
+ ShortRange,
+ SyncEpoch,
+ Heads,
+ }
}
-var DefaultCleanUpOrder = CleanUpOrder{
- Finish,
- LastMile,
- States,
- BlockBodies,
- ShortRange,
- SyncEpoch,
- Heads,
+func initFastSyncStagesOrder() {
+ StagesForwardOrder = ForwardOrder{
+ Heads,
+ SyncEpoch,
+ ShortRange,
+ BlockBodies,
+ Receipts,
+ FullStateSync,
+ States,
+ LastMile,
+ Finish,
+ }
+
+ StagesRevertOrder = RevertOrder{
+ Finish,
+ LastMile,
+ States,
+ FullStateSync,
+ Receipts,
+ BlockBodies,
+ ShortRange,
+ SyncEpoch,
+ Heads,
+ }
+
+ StagesCleanUpOrder = CleanUpOrder{
+ Finish,
+ LastMile,
+ States,
+ FullStateSync,
+ Receipts,
+ BlockBodies,
+ ShortRange,
+ SyncEpoch,
+ Heads,
+ }
}
func DefaultStages(ctx context.Context,
@@ -44,7 +100,10 @@ func DefaultStages(ctx context.Context,
seCfg StageEpochCfg,
srCfg StageShortRangeCfg,
bodiesCfg StageBodiesCfg,
+ stateSyncCfg StageStateSyncCfg,
+ fullStateSyncCfg StageFullStateSyncCfg,
statesCfg StageStatesCfg,
+ receiptsCfg StageReceiptsCfg,
lastMileCfg StageLastMileCfg,
finishCfg StageFinishCfg,
) []*Stage {
@@ -54,44 +113,82 @@ func DefaultStages(ctx context.Context,
handlerStageEpochSync := NewStageEpoch(seCfg)
handlerStageBodies := NewStageBodies(bodiesCfg)
handlerStageStates := NewStageStates(statesCfg)
+ handlerStageStateSync := NewStageStateSync(stateSyncCfg)
+ handlerStageFullStateSync := NewStageFullStateSync(fullStateSyncCfg)
+ handlerStageReceipts := NewStageReceipts(receiptsCfg)
handlerStageLastMile := NewStageLastMile(lastMileCfg)
handlerStageFinish := NewStageFinish(finishCfg)
return []*Stage{
{
- ID: Heads,
- Description: "Retrieve Chain Heads",
- Handler: handlerStageHeads,
+ ID: Heads,
+ Description: "Retrieve Chain Heads",
+ Handler: handlerStageHeads,
+ RangeMode: OnlyLongRange,
+ ChainExecutionMode: AllChains,
+ },
+ {
+ ID: SyncEpoch,
+ Description: "Sync only Last Block of Epoch",
+ Handler: handlerStageEpochSync,
+ RangeMode: OnlyShortRange,
+ ChainExecutionMode: OnlyEpochChain,
+ },
+ {
+ ID: ShortRange,
+ Description: "Short Range Sync",
+ Handler: handlerStageShortRange,
+ RangeMode: OnlyShortRange,
+ ChainExecutionMode: AllChainsExceptEpochChain,
+ },
+ {
+ ID: BlockBodies,
+ Description: "Retrieve Block Bodies",
+ Handler: handlerStageBodies,
+ RangeMode: OnlyLongRange,
+ ChainExecutionMode: AllChainsExceptEpochChain,
},
{
- ID: SyncEpoch,
- Description: "Sync only Last Block of Epoch",
- Handler: handlerStageEpochSync,
+ ID: States,
+ Description: "Update Blockchain State",
+ Handler: handlerStageStates,
+ RangeMode: OnlyLongRange,
+ ChainExecutionMode: AllChainsExceptEpochChain,
},
{
- ID: ShortRange,
- Description: "Short Range Sync",
- Handler: handlerStageShortRange,
+ ID: StateSync,
+ Description: "Retrieve States",
+ Handler: handlerStageStateSync,
+ RangeMode: OnlyLongRange,
+ ChainExecutionMode: AllChainsExceptEpochChain,
},
{
- ID: BlockBodies,
- Description: "Retrieve Block Bodies",
- Handler: handlerStageBodies,
+ ID: FullStateSync,
+ Description: "Retrieve Full States",
+ Handler: handlerStageFullStateSync,
+ RangeMode: OnlyLongRange,
+ ChainExecutionMode: AllChainsExceptEpochChain,
},
{
- ID: States,
- Description: "Update Blockchain State",
- Handler: handlerStageStates,
+ ID: Receipts,
+ Description: "Retrieve Receipts",
+ Handler: handlerStageReceipts,
+ RangeMode: OnlyLongRange,
+ ChainExecutionMode: AllChainsExceptEpochChain,
},
{
- ID: LastMile,
- Description: "update status for blocks after sync and update last mile blocks as well",
- Handler: handlerStageLastMile,
+ ID: LastMile,
+ Description: "update status for blocks after sync and update last mile blocks as well",
+ Handler: handlerStageLastMile,
+ RangeMode: LongRangeAndShortRange,
+ ChainExecutionMode: AllChainsExceptEpochChain,
},
{
- ID: Finish,
- Description: "Finalize Changes",
- Handler: handlerStageFinish,
+ ID: Finish,
+ Description: "Finalize Changes",
+ Handler: handlerStageFinish,
+ RangeMode: LongRangeAndShortRange,
+ ChainExecutionMode: AllChains,
},
}
}
diff --git a/api/service/stagedstreamsync/downloader.go b/api/service/stagedstreamsync/downloader.go
index 3711048955..9d564b016c 100644
--- a/api/service/stagedstreamsync/downloader.go
+++ b/api/service/stagedstreamsync/downloader.go
@@ -285,4 +285,5 @@ func (d *Downloader) loop() {
return
}
}
+
}
diff --git a/api/service/stagedstreamsync/helpers.go b/api/service/stagedstreamsync/helpers.go
index 75e504214f..96c1c22b07 100644
--- a/api/service/stagedstreamsync/helpers.go
+++ b/api/service/stagedstreamsync/helpers.go
@@ -73,6 +73,27 @@ func checkGetBlockByHashesResult(blocks []*types.Block, hashes []common.Hash) er
return nil
}
+func getBlockByMaxVote(blocks []*types.Block) (*types.Block, error) {
+ hashesVote := make(map[common.Hash]int)
+ maxVote := int(-1)
+ maxVotedBlockIndex := int(0)
+
+ for i, block := range blocks {
+ if block == nil {
+ continue
+ }
+ hashesVote[block.Header().Hash()]++
+ if hashesVote[block.Header().Hash()] > maxVote {
+ maxVote = hashesVote[block.Header().Hash()]
+ maxVotedBlockIndex = i
+ }
+ }
+ if maxVote < 0 {
+ return nil, ErrInvalidBlockBytes
+ }
+ return blocks[maxVotedBlockIndex], nil
+}
+
func countHashMaxVote(m map[sttypes.StreamID]common.Hash, whitelist map[sttypes.StreamID]struct{}) (common.Hash, map[sttypes.StreamID]struct{}) {
var (
voteM = make(map[common.Hash]int)
diff --git a/api/service/stagedstreamsync/proof.go b/api/service/stagedstreamsync/proof.go
new file mode 100644
index 0000000000..216d797d45
--- /dev/null
+++ b/api/service/stagedstreamsync/proof.go
@@ -0,0 +1,146 @@
+package stagedstreamsync
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+// ProofSet stores a set of trie nodes. It implements trie.Database and can also
+// act as a cache for another trie.Database.
+type ProofSet struct {
+ nodes map[string][]byte
+ order []string
+
+ dataSize int
+ lock sync.RWMutex
+}
+
+// NewProofSet creates an empty node set
+func NewProofSet() *ProofSet {
+ return &ProofSet{
+ nodes: make(map[string][]byte),
+ }
+}
+
+// Put stores a new node in the set
+func (db *ProofSet) Put(key []byte, value []byte) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ if _, ok := db.nodes[string(key)]; ok {
+ return nil
+ }
+ keystr := string(key)
+
+ db.nodes[keystr] = common.CopyBytes(value)
+ db.order = append(db.order, keystr)
+ db.dataSize += len(value)
+
+ return nil
+}
+
+// Delete removes a node from the set
+func (db *ProofSet) Delete(key []byte) error {
+ db.lock.Lock()
+ defer db.lock.Unlock()
+
+ delete(db.nodes, string(key))
+ return nil
+}
+
+// Get returns a stored node
+func (db *ProofSet) Get(key []byte) ([]byte, error) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ if entry, ok := db.nodes[string(key)]; ok {
+ return entry, nil
+ }
+ return nil, errors.New("not found")
+}
+
+// Has returns true if the node set contains the given key
+func (db *ProofSet) Has(key []byte) (bool, error) {
+ _, err := db.Get(key)
+ return err == nil, nil
+}
+
+// KeyCount returns the number of nodes in the set
+func (db *ProofSet) KeyCount() int {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ return len(db.nodes)
+}
+
+// DataSize returns the aggregated data size of nodes in the set
+func (db *ProofSet) DataSize() int {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ return db.dataSize
+}
+
+// List converts the node set to a ProofList
+func (db *ProofSet) List() ProofList {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ var values ProofList
+ for _, key := range db.order {
+ values = append(values, db.nodes[key])
+ }
+ return values
+}
+
+// Store writes the contents of the set to the given database
+func (db *ProofSet) Store(target ethdb.KeyValueWriter) {
+ db.lock.RLock()
+ defer db.lock.RUnlock()
+
+ for key, value := range db.nodes {
+ target.Put([]byte(key), value)
+ }
+}
+
+// ProofList stores an ordered list of trie nodes. It implements ethdb.KeyValueWriter.
+type ProofList []rlp.RawValue
+
+// Store writes the contents of the list to the given database
+func (n ProofList) Store(db ethdb.KeyValueWriter) {
+ for _, node := range n {
+ db.Put(crypto.Keccak256(node), node)
+ }
+}
+
+// Set converts the node list to a ProofSet
+func (n ProofList) Set() *ProofSet {
+ db := NewProofSet()
+ n.Store(db)
+ return db
+}
+
+// Put stores a new node at the end of the list
+func (n *ProofList) Put(key []byte, value []byte) error {
+ *n = append(*n, value)
+ return nil
+}
+
+// Delete panics as there's no reason to remove a node from the list.
+func (n *ProofList) Delete(key []byte) error {
+ panic("not supported")
+}
+
+// DataSize returns the aggregated data size of nodes in the list
+func (n ProofList) DataSize() int {
+ var size int
+ for _, node := range n {
+ size += len(node)
+ }
+ return size
+}
diff --git a/api/service/stagedstreamsync/range.go b/api/service/stagedstreamsync/range.go
new file mode 100644
index 0000000000..d05a92ed40
--- /dev/null
+++ b/api/service/stagedstreamsync/range.go
@@ -0,0 +1,84 @@
+// Copyright 2021 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package stagedstreamsync
+
+import (
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/holiman/uint256"
+)
+
+// hashSpace is the total size of the 256 bit hash space for accounts.
+var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)
+
+// hashRange is a utility to handle ranges of hashes, Split up the
+// hash-space into sections, and 'walk' over the sections
+type hashRange struct {
+ current *uint256.Int
+ step *uint256.Int
+}
+
+// newHashRange creates a new hashRange, initiated at the start position,
+// and with the step set to fill the desired 'num' chunks
+func newHashRange(start common.Hash, num uint64) *hashRange {
+ left := new(big.Int).Sub(hashSpace, start.Big())
+ step := new(big.Int).Div(
+ new(big.Int).Add(left, new(big.Int).SetUint64(num-1)),
+ new(big.Int).SetUint64(num),
+ )
+ step256 := new(uint256.Int)
+ step256.SetFromBig(step)
+
+ return &hashRange{
+ current: new(uint256.Int).SetBytes32(start[:]),
+ step: step256,
+ }
+}
+
+// Next pushes the hash range to the next interval.
+func (r *hashRange) Next() bool {
+ next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
+ if overflow {
+ return false
+ }
+ r.current = next
+ return true
+}
+
+// Start returns the first hash in the current interval.
+func (r *hashRange) Start() common.Hash {
+ return r.current.Bytes32()
+}
+
+// End returns the last hash in the current interval.
+func (r *hashRange) End() common.Hash {
+ // If the end overflows (non divisible range), return a shorter interval
+ next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
+ if overflow {
+ return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+ }
+ return next.SubUint64(next, 1).Bytes32()
+}
+
+// incHash returns the next hash, in lexicographical order (a.k.a plus one)
+func incHash(h common.Hash) common.Hash {
+ var a uint256.Int
+ a.SetBytes32(h[:])
+ a.AddUint64(&a, 1)
+ return common.Hash(a.Bytes32())
+}
diff --git a/api/service/stagedstreamsync/receipt_download_manager.go b/api/service/stagedstreamsync/receipt_download_manager.go
new file mode 100644
index 0000000000..55d9490828
--- /dev/null
+++ b/api/service/stagedstreamsync/receipt_download_manager.go
@@ -0,0 +1,180 @@
+package stagedstreamsync
+
+import (
+ "sync"
+
+ "github.com/harmony-one/harmony/core/types"
+ sttypes "github.com/harmony-one/harmony/p2p/stream/types"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/rs/zerolog"
+)
+
+type ReceiptDownloadDetails struct {
+ streamID sttypes.StreamID
+}
+
+type Received struct {
+ streamID sttypes.StreamID
+ block *types.Block
+ receipts types.Receipts
+}
+
+// receiptDownloadManager is the helper structure for get receipts request management
+type receiptDownloadManager struct {
+ chain blockChain
+ tx kv.RwTx
+
+ targetBN uint64
+ requesting map[uint64]struct{} // receipt numbers that have been assigned to workers but not received
+ processing map[uint64]struct{} // receipt numbers received requests but not inserted
+ retries *prioritizedNumbers // requests where error happens
+ rdd map[uint64]ReceiptDownloadDetails // details about how this receipt was downloaded
+
+ received map[uint64]Received
+
+ logger zerolog.Logger
+ lock sync.Mutex
+}
+
+func newReceiptDownloadManager(tx kv.RwTx, chain blockChain, targetBN uint64, logger zerolog.Logger) *receiptDownloadManager {
+ return &receiptDownloadManager{
+ chain: chain,
+ tx: tx,
+ targetBN: targetBN,
+ requesting: make(map[uint64]struct{}),
+ processing: make(map[uint64]struct{}),
+ retries: newPrioritizedNumbers(),
+ rdd: make(map[uint64]ReceiptDownloadDetails),
+ received: make(map[uint64]Received),
+
+ logger: logger,
+ }
+}
+
+// GetNextBatch get the next receipt numbers batch
+func (rdm *receiptDownloadManager) GetNextBatch(curHeight uint64) []uint64 {
+ rdm.lock.Lock()
+ defer rdm.lock.Unlock()
+
+ cap := ReceiptsPerRequest
+
+ bns := rdm.getBatchFromRetries(cap, curHeight)
+ if len(bns) > 0 {
+ cap -= len(bns)
+ rdm.addBatchToRequesting(bns)
+ }
+
+ if rdm.availableForMoreTasks() {
+ addBNs := rdm.getBatchFromUnprocessed(cap, curHeight)
+ rdm.addBatchToRequesting(addBNs)
+ bns = append(bns, addBNs...)
+ }
+
+ return bns
+}
+
+// HandleRequestError handles the error result
+func (rdm *receiptDownloadManager) HandleRequestError(bns []uint64, err error) {
+ rdm.lock.Lock()
+ defer rdm.lock.Unlock()
+
+ // add requested receipt numbers to retries
+ for _, bn := range bns {
+ delete(rdm.requesting, bn)
+ rdm.retries.push(bn)
+ }
+}
+
+// HandleRequestResult handles get receipts result
+func (rdm *receiptDownloadManager) HandleRequestResult(bns []uint64, receivedReceipts []types.Receipts, receivedBlocks []*types.Block, streamID sttypes.StreamID) error {
+ rdm.lock.Lock()
+ defer rdm.lock.Unlock()
+
+ for i, bn := range bns {
+ delete(rdm.requesting, bn)
+ if !indexExists(receivedBlocks, i) || !indexExists(receivedReceipts, i) {
+ rdm.retries.push(bn)
+ } else {
+ rdm.processing[bn] = struct{}{}
+ rdm.rdd[bn] = ReceiptDownloadDetails{
+ streamID: streamID,
+ }
+ rdm.received[bn] = Received{
+ block: receivedBlocks[i],
+ receipts: receivedReceipts[i],
+ }
+ }
+ }
+ return nil
+}
+
+// SetDownloadDetails sets the download details for a batch of blocks
+func (rdm *receiptDownloadManager) SetDownloadDetails(bns []uint64, streamID sttypes.StreamID) error {
+ rdm.lock.Lock()
+ defer rdm.lock.Unlock()
+
+ for _, bn := range bns {
+ rdm.rdd[bn] = ReceiptDownloadDetails{
+ streamID: streamID,
+ }
+ }
+ return nil
+}
+
+// GetDownloadDetails returns the download details for a certain block number
+func (rdm *receiptDownloadManager) GetDownloadDetails(blockNumber uint64) (streamID sttypes.StreamID) {
+ rdm.lock.Lock()
+ defer rdm.lock.Unlock()
+
+ return rdm.rdd[blockNumber].streamID
+}
+
+// getBatchFromRetries get the receipt number batch to be requested from retries.
+func (rdm *receiptDownloadManager) getBatchFromRetries(cap int, fromBlockNumber uint64) []uint64 {
+ var (
+ requestBNs []uint64
+ )
+ for cnt := 0; cnt < cap; cnt++ {
+ bn := rdm.retries.pop()
+ if bn == 0 {
+ break // no more retries
+ }
+ if bn <= fromBlockNumber {
+ continue
+ }
+ requestBNs = append(requestBNs, bn)
+ }
+ return requestBNs
+}
+
+// getBatchFromUnprocessed returns a batch of receipt numbers to be requested from unprocessed.
+func (rdm *receiptDownloadManager) getBatchFromUnprocessed(cap int, curHeight uint64) []uint64 {
+ var (
+ requestBNs []uint64
+ )
+ bn := curHeight + 1
+ // TODO: this algorithm can be potentially optimized.
+ for cnt := 0; cnt < cap && bn <= rdm.targetBN; cnt++ {
+ for bn <= rdm.targetBN {
+ _, ok1 := rdm.requesting[bn]
+ _, ok2 := rdm.processing[bn]
+ if !ok1 && !ok2 {
+ requestBNs = append(requestBNs, bn)
+ bn++
+ break
+ }
+ bn++
+ }
+ }
+ return requestBNs
+}
+
+func (rdm *receiptDownloadManager) availableForMoreTasks() bool {
+ return len(rdm.requesting) < SoftQueueCap
+}
+
+func (rdm *receiptDownloadManager) addBatchToRequesting(bns []uint64) {
+ for _, bn := range bns {
+ rdm.requesting[bn] = struct{}{}
+ }
+}
diff --git a/api/service/stagedstreamsync/sig_verify.go b/api/service/stagedstreamsync/sig_verify.go
index 649c6eaec1..cd7fc4f913 100644
--- a/api/service/stagedstreamsync/sig_verify.go
+++ b/api/service/stagedstreamsync/sig_verify.go
@@ -3,6 +3,7 @@ package stagedstreamsync
import (
"fmt"
+ "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/chain"
@@ -28,7 +29,7 @@ func verifyAndInsertBlocks(bc blockChain, blocks types.Blocks) (int, error) {
return len(blocks), nil
}
-func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error {
+func verifyBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error {
var (
sigBytes bls.SerializedSignature
bitmap []byte
@@ -53,7 +54,20 @@ func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*type
if err := bc.Engine().VerifyHeader(bc, block.Header(), true); err != nil {
return errors.Wrap(err, "[VerifyHeader]")
}
+
+ return nil
+}
+
+func verifyAndInsertBlock(bc blockChain, block *types.Block, nextBlocks ...*types.Block) error {
+ //verify block
+ if err := verifyBlock(bc, block, nextBlocks...); err != nil {
+ return err
+ }
+ // insert block
if _, err := bc.InsertChain(types.Blocks{block}, false); err != nil {
+ if errors.Is(err, core.ErrKnownBlock) {
+ return nil
+ }
return errors.Wrap(err, "[InsertChain]")
}
return nil
diff --git a/api/service/stagedstreamsync/stage.go b/api/service/stagedstreamsync/stage.go
index 48334a5e52..59602fe818 100644
--- a/api/service/stagedstreamsync/stage.go
+++ b/api/service/stagedstreamsync/stage.go
@@ -30,6 +30,25 @@ type StageHandler interface {
CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) error
}
+type RangeExecution uint32
+
+const (
+ LongRangeAndShortRange RangeExecution = iota // Both short range and long range
+ OnlyShortRange // only short range
+ OnlyLongRange // only long range
+ //OnlyEpochSync // only epoch sync
+)
+
+type ChainExecution uint32
+
+const (
+ AllChains ChainExecution = iota // Can execute for any shard
+ AllChainsExceptEpochChain // Can execute for any shard except epoch chain
+ OnlyBeaconNode // only for beacon node
+ OnlyEpochChain // only for epoch chain
+ OnlyShardChain // only for shard node (exclude beacon node and epoch chain)
+)
+
// Stage is a single sync stage in staged sync.
type Stage struct {
// ID of the sync stage. Should not be empty and should be unique. It is recommended to prefix it with reverse domain to avoid clashes (`com.example.my-stage`).
@@ -42,6 +61,10 @@ type Stage struct {
DisabledDescription string
// Disabled defines if the stage is disabled. It sets up when the stage is build by its `StageBuilder`.
Disabled bool
+ // Range defines whether stage has to be executed for either long range or short range
+ RangeMode RangeExecution
+ // ShardExecution defines this stage has to be executed for which shards
+ ChainExecutionMode ChainExecution
}
// StageState is the state of the stage.
diff --git a/api/service/stagedstreamsync/stage_bodies.go b/api/service/stagedstreamsync/stage_bodies.go
index b5d92e3a1a..9fdf4681a1 100644
--- a/api/service/stagedstreamsync/stage_bodies.go
+++ b/api/service/stagedstreamsync/stage_bodies.go
@@ -6,6 +6,7 @@ import (
"sync"
"time"
+ "github.com/ethereum/go-ethereum/rlp"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
@@ -20,13 +21,14 @@ type StageBodies struct {
}
type StageBodiesCfg struct {
- bc core.BlockChain
- db kv.RwDB
- blockDBs []kv.RwDB
- concurrency int
- protocol syncProtocol
- isBeacon bool
- logProgress bool
+ bc core.BlockChain
+ db kv.RwDB
+ blockDBs []kv.RwDB
+ concurrency int
+ protocol syncProtocol
+ isBeacon bool
+ extractReceiptHashes bool
+ logProgress bool
}
func NewStageBodies(cfg StageBodiesCfg) *StageBodies {
@@ -35,15 +37,16 @@ func NewStageBodies(cfg StageBodiesCfg) *StageBodies {
}
}
-func NewStageBodiesCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concurrency int, protocol syncProtocol, isBeacon bool, logProgress bool) StageBodiesCfg {
+func NewStageBodiesCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concurrency int, protocol syncProtocol, isBeacon bool, extractReceiptHashes bool, logProgress bool) StageBodiesCfg {
return StageBodiesCfg{
- bc: bc,
- db: db,
- blockDBs: blockDBs,
- concurrency: concurrency,
- protocol: protocol,
- isBeacon: isBeacon,
- logProgress: logProgress,
+ bc: bc,
+ db: db,
+ blockDBs: blockDBs,
+ concurrency: concurrency,
+ protocol: protocol,
+ isBeacon: isBeacon,
+ extractReceiptHashes: extractReceiptHashes,
+ logProgress: logProgress,
}
}
@@ -67,7 +70,7 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev
}
maxHeight := s.state.status.targetBN
- currentHead := b.configs.bc.CurrentBlock().NumberU64()
+ currentHead := s.state.CurrentBlockNumber()
if currentHead >= maxHeight {
return nil
}
@@ -118,7 +121,7 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev
for i := 0; i != s.state.config.Concurrency; i++ {
wg.Add(1)
- go b.runBlockWorkerLoop(ctx, s.state.gbm, &wg, i, startTime)
+ go b.runBlockWorkerLoop(ctx, s.state.gbm, &wg, i, s, startTime)
}
wg.Wait()
@@ -133,9 +136,9 @@ func (b *StageBodies) Exec(ctx context.Context, firstCycle bool, invalidBlockRev
}
// runBlockWorkerLoop creates a work loop for download blocks
-func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time) {
+func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownloadManager, wg *sync.WaitGroup, loopID int, s *StageState, startTime time.Time) {
- currentBlock := int(b.configs.bc.CurrentBlock().NumberU64())
+ currentBlock := int(s.state.CurrentBlockNumber())
defer wg.Done()
@@ -145,7 +148,8 @@ func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownload
return
default:
}
- batch := gbm.GetNextBatch()
+ curHeight := s.state.CurrentBlockNumber()
+ batch := gbm.GetNextBatch(curHeight)
if len(batch) == 0 {
select {
case <-ctx.Done():
@@ -204,6 +208,34 @@ func (b *StageBodies) runBlockWorkerLoop(ctx context.Context, gbm *blockDownload
}
}
+func (b *StageBodies) verifyBlockAndExtractReceiptsData(batchBlockBytes [][]byte, batchSigBytes [][]byte, s *StageState) error {
+ var block *types.Block
+ for i := uint64(0); i < uint64(len(batchBlockBytes)); i++ {
+ blockBytes := batchBlockBytes[i]
+ sigBytes := batchSigBytes[i]
+ if blockBytes == nil {
+ continue
+ }
+ if err := rlp.DecodeBytes(blockBytes, &block); err != nil {
+ utils.Logger().Error().
+ Uint64("block number", i).
+ Msg("block size invalid")
+ return ErrInvalidBlockBytes
+ }
+ if sigBytes != nil {
+ block.SetCurrentCommitSig(sigBytes)
+ }
+
+ // if block.NumberU64() != i {
+ // return ErrInvalidBlockNumber
+ // }
+ if err := verifyBlock(b.configs.bc, block); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// redownloadBadBlock tries to redownload the bad block from other streams
func (b *StageBodies) redownloadBadBlock(ctx context.Context, s *StageState) error {
@@ -403,7 +435,7 @@ func (b *StageBodies) Revert(ctx context.Context, firstCycle bool, u *RevertStat
defer tx.Rollback()
}
// save progress
- currentHead := b.configs.bc.CurrentBlock().NumberU64()
+ currentHead := s.state.CurrentBlockNumber()
if err = s.Update(tx, currentHead); err != nil {
utils.Logger().Error().
Err(err).
diff --git a/api/service/stagedstreamsync/stage_epoch.go b/api/service/stagedstreamsync/stage_epoch.go
index e84b74f340..8129ce0db4 100644
--- a/api/service/stagedstreamsync/stage_epoch.go
+++ b/api/service/stagedstreamsync/stage_epoch.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/harmony-one/harmony/core"
+ "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/harmony-one/harmony/shard"
@@ -129,13 +130,20 @@ func (sr *StageEpoch) doShortRangeSyncForEpochSync(ctx context.Context, s *Stage
return 0, nil
}
- n, err := s.state.bc.InsertChain(blocks, true)
- numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n))
- if err != nil {
- utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("Insert block failed")
- sh.streamsFailed([]sttypes.StreamID{streamID}, "corrupted data")
- return n, err
+ n := 0
+ for _, block := range blocks {
+ _, err := s.state.bc.InsertChain([]*types.Block{block}, true)
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ case err != nil:
+ utils.Logger().Info().Err(err).Int("blocks inserted", n).Msg("Insert block failed")
+ sh.streamsFailed([]sttypes.StreamID{streamID}, "corrupted data")
+ return n, err
+ default:
+ }
+ n++
}
+ numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n))
return n, nil
}
diff --git a/api/service/stagedstreamsync/stage_finish.go b/api/service/stagedstreamsync/stage_finish.go
index 0dfae53ae2..c94aa692bf 100644
--- a/api/service/stagedstreamsync/stage_finish.go
+++ b/api/service/stagedstreamsync/stage_finish.go
@@ -39,6 +39,11 @@ func (finish *StageFinish) Exec(ctx context.Context, firstCycle bool, invalidBlo
// TODO: prepare indices (useful for RPC) and finalize
+ // switch to Full Sync Mode if the states are synced
+ if s.state.status.statesSynced {
+ s.state.status.cycleSyncMode = FullSync
+ }
+
if useInternalTx {
if err := tx.Commit(); err != nil {
return err
diff --git a/api/service/stagedstreamsync/stage_heads.go b/api/service/stagedstreamsync/stage_heads.go
index c917884a36..bf0721aad7 100644
--- a/api/service/stagedstreamsync/stage_heads.go
+++ b/api/service/stagedstreamsync/stage_heads.go
@@ -53,7 +53,7 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock
maxHeight := s.state.status.targetBN
maxBlocksPerSyncCycle := uint64(1024) // TODO: should be in config -> s.state.MaxBlocksPerSyncCycle
- currentHeight := heads.configs.bc.CurrentBlock().NumberU64()
+ currentHeight := s.state.CurrentBlockNumber()
s.state.currentCycle.TargetHeight = maxHeight
targetHeight := uint64(0)
if errV := CreateView(ctx, heads.configs.db, tx, func(etx kv.Tx) (err error) {
@@ -89,6 +89,14 @@ func (heads *StageHeads) Exec(ctx context.Context, firstCycle bool, invalidBlock
targetHeight = currentHeight + maxBlocksPerSyncCycle
}
+ // check pivot: if chain hasn't reached to pivot yet
+ if s.state.status.cycleSyncMode != FullSync && s.state.status.pivotBlock != nil {
+ // set target height on the pivot block
+ if !s.state.status.statesSynced && targetHeight > s.state.status.pivotBlock.NumberU64() {
+ targetHeight = s.state.status.pivotBlock.NumberU64()
+ }
+ }
+
s.state.currentCycle.TargetHeight = targetHeight
if err := s.Update(tx, targetHeight); err != nil {
diff --git a/api/service/stagedstreamsync/stage_receipts.go b/api/service/stagedstreamsync/stage_receipts.go
new file mode 100644
index 0000000000..78e8e089cd
--- /dev/null
+++ b/api/service/stagedstreamsync/stage_receipts.go
@@ -0,0 +1,404 @@
+package stagedstreamsync
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/harmony-one/harmony/core"
+ "github.com/harmony-one/harmony/core/types"
+ "github.com/harmony-one/harmony/internal/utils"
+ sttypes "github.com/harmony-one/harmony/p2p/stream/types"
+ "github.com/harmony-one/harmony/shard"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/pkg/errors"
+)
+
+type StageReceipts struct {
+ configs StageReceiptsCfg
+}
+
+type StageReceiptsCfg struct {
+ bc core.BlockChain
+ db kv.RwDB
+ blockDBs []kv.RwDB
+ concurrency int
+ protocol syncProtocol
+ isBeacon bool
+ logProgress bool
+}
+
+func NewStageReceipts(cfg StageReceiptsCfg) *StageReceipts {
+ return &StageReceipts{
+ configs: cfg,
+ }
+}
+
+func NewStageReceiptsCfg(bc core.BlockChain, db kv.RwDB, blockDBs []kv.RwDB, concurrency int, protocol syncProtocol, isBeacon bool, logProgress bool) StageReceiptsCfg {
+ return StageReceiptsCfg{
+ bc: bc,
+ db: db,
+ blockDBs: blockDBs,
+ concurrency: concurrency,
+ protocol: protocol,
+ isBeacon: isBeacon,
+ logProgress: logProgress,
+ }
+}
+
+// Exec progresses receipts stage in the forward direction
+func (r *StageReceipts) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) {
+
+ // only execute this stage in fast/snap sync mode
+ if s.state.status.cycleSyncMode == FullSync {
+ return nil
+ }
+
+ // shouldn't execute for epoch chain
+ if r.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode {
+ return nil
+ }
+
+ useInternalTx := tx == nil
+
+ if invalidBlockRevert {
+ return nil
+ }
+
+ // for short range sync, skip this stage
+ if !s.state.initSync {
+ return nil
+ }
+
+ maxHeight := s.state.status.targetBN
+ currentHead := s.state.CurrentBlockNumber()
+ if currentHead >= maxHeight {
+ return nil
+ }
+ currProgress := uint64(0)
+ targetHeight := s.state.currentCycle.TargetHeight
+
+ if errV := CreateView(ctx, r.configs.db, tx, func(etx kv.Tx) error {
+ if currProgress, err = s.CurrentStageProgress(etx); err != nil {
+ return err
+ }
+ return nil
+ }); errV != nil {
+ return errV
+ }
+
+ if currProgress == 0 {
+ currProgress = currentHead
+ }
+
+ if currProgress >= targetHeight {
+ return nil
+ }
+
+ // size := uint64(0)
+ startTime := time.Now()
+ // startBlock := currProgress
+
+ if r.configs.logProgress {
+ fmt.Print("\033[s") // save the cursor position
+ }
+
+ if useInternalTx {
+ var err error
+ tx, err = r.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ for {
+ // check if there is no any more to download break the loop
+ curBn := s.state.CurrentBlockNumber()
+ if curBn == targetHeight {
+ break
+ }
+
+ // calculate the block numbers range to download
+ toBn := curBn + uint64(ReceiptsPerRequest*s.state.config.Concurrency)
+ if toBn > targetHeight {
+ toBn = targetHeight
+ }
+
+ // Fetch receipts from connected peers
+ rdm := newReceiptDownloadManager(tx, r.configs.bc, toBn, s.state.logger)
+
+ // Setup workers to fetch blocks from remote node
+ var wg sync.WaitGroup
+
+ for i := 0; i < s.state.config.Concurrency; i++ {
+ wg.Add(1)
+ go func() {
+ // prepare db transactions
+ txs := make([]kv.RwTx, r.configs.concurrency)
+ for i := 0; i < r.configs.concurrency; i++ {
+ txs[i], err = r.configs.blockDBs[i].BeginRw(ctx)
+ if err != nil {
+ return
+ }
+ }
+ // rollback the transactions after worker loop
+ defer func() {
+ for i := 0; i < r.configs.concurrency; i++ {
+ txs[i].Rollback()
+ }
+ }()
+
+ r.runReceiptWorkerLoop(ctx, rdm, &wg, s, txs, startTime)
+ }()
+ }
+ wg.Wait()
+ // insert all downloaded blocks and receipts to chain
+ if err := r.insertBlocksAndReceipts(ctx, rdm, toBn, s); err != nil {
+ utils.Logger().Err(err).Msg(WrapStagedSyncMsg("InsertReceiptChain failed"))
+ }
+ }
+
+ if useInternalTx {
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *StageReceipts) insertBlocksAndReceipts(ctx context.Context, rdm *receiptDownloadManager, toBn uint64, s *StageState) error {
+ if len(rdm.received) == 0 {
+ return nil
+ }
+ var (
+ bns []uint64
+ blocks []*types.Block
+ receipts []types.Receipts
+ streamIDs []sttypes.StreamID
+ )
+ // populate blocks and receipts in separate array
+ // this way helps to sort blocks and receipts by block number
+ for bn := s.state.CurrentBlockNumber() + 1; bn <= toBn; bn++ {
+ if received, ok := rdm.received[bn]; !ok {
+ return errors.New("some blocks are missing")
+ } else {
+ bns = append(bns, bn)
+ blocks = append(blocks, received.block)
+ receipts = append(receipts, received.receipts)
+ streamIDs = append(streamIDs, received.streamID)
+ }
+ }
+ // insert sorted blocks and receipts to chain
+ if inserted, err := r.configs.bc.InsertReceiptChain(blocks, receipts); err != nil {
+ utils.Logger().Err(err).
+ Interface("streams", streamIDs).
+ Interface("block numbers", bns).
+ Msg(WrapStagedSyncMsg("InsertReceiptChain failed"))
+ rdm.HandleRequestError(bns, err)
+ return fmt.Errorf("InsertReceiptChain failed: %s", err.Error())
+ } else {
+ if inserted != len(blocks) {
+ utils.Logger().Warn().
+ Interface("block numbers", bns).
+ Int("inserted", inserted).
+ Int("blocks to insert", len(blocks)).
+ Msg(WrapStagedSyncMsg("InsertReceiptChain couldn't insert all downloaded blocks/receipts"))
+ }
+ }
+ return nil
+}
+
+// runReceiptWorkerLoop creates a work loop for download receipts
+func (r *StageReceipts) runReceiptWorkerLoop(ctx context.Context, rdm *receiptDownloadManager, wg *sync.WaitGroup, s *StageState, txs []kv.RwTx, startTime time.Time) {
+
+ currentBlock := int(s.state.CurrentBlockNumber())
+ gbm := s.state.gbm
+
+ defer wg.Done()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ // get next batch of block numbers
+ curHeight := s.state.CurrentBlockNumber()
+ batch := rdm.GetNextBatch(curHeight)
+ if len(batch) == 0 {
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(100 * time.Millisecond):
+ return
+ }
+ }
+ // retrieve corresponding blocks from cache db
+ var hashes []common.Hash
+ var blocks []*types.Block
+
+ for _, bn := range batch {
+ blkKey := marshalData(bn)
+ loopID, _, errBDD := gbm.GetDownloadDetails(bn)
+ if errBDD != nil {
+ utils.Logger().Warn().
+ Err(errBDD).
+ Interface("block numbers", bn).
+ Msg(WrapStagedSyncMsg("get block download details failed"))
+ return
+ }
+ blockBytes, err := txs[loopID].GetOne(BlocksBucket, blkKey)
+ if err != nil {
+ return
+ }
+ sigBytes, err := txs[loopID].GetOne(BlockSignaturesBucket, blkKey)
+ if err != nil {
+ return
+ }
+ sz := len(blockBytes)
+ if sz <= 1 {
+ return
+ }
+ var block *types.Block
+ if err := rlp.DecodeBytes(blockBytes, &block); err != nil {
+ return
+ }
+ if sigBytes != nil {
+ block.SetCurrentCommitSig(sigBytes)
+ }
+ if block.NumberU64() != bn {
+ return
+ }
+ if block.Header().ReceiptHash() == emptyHash {
+ return
+ }
+ // receiptHash := s.state.currentCycle.ReceiptHashes[bn]
+ gbm.SetRootHash(bn, block.Header().Root())
+ hashes = append(hashes, block.Header().Hash())
+ blocks = append(blocks, block)
+ }
+
+ // download receipts
+ receipts, stid, err := r.downloadReceipts(ctx, hashes)
+ if err != nil {
+ if !errors.Is(err, context.Canceled) {
+ r.configs.protocol.StreamFailed(stid, "downloadRawBlocks failed")
+ }
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Interface("block numbers", batch).
+ Msg(WrapStagedSyncMsg("downloadRawBlocks failed"))
+ err = errors.Wrap(err, "request error")
+ rdm.HandleRequestError(batch, err)
+ } else {
+ // handle request result
+ rdm.HandleRequestResult(batch, receipts, blocks, stid)
+ // log progress
+ if r.configs.logProgress {
+ //calculating block download speed
+ dt := time.Now().Sub(startTime).Seconds()
+ speed := float64(0)
+ if dt > 0 {
+ speed = float64(len(rdm.rdd)) / dt
+ }
+ blockReceiptSpeed := fmt.Sprintf("%.2f", speed)
+
+ fmt.Print("\033[u\033[K") // restore the cursor position and clear the line
+ fmt.Println("downloaded blocks and receipts:", currentBlock+len(rdm.rdd), "/", int(rdm.targetBN), "(", blockReceiptSpeed, "BlocksAndReceipts/s", ")")
+ }
+ }
+ }
+}
+
+func (r *StageReceipts) downloadReceipts(ctx context.Context, hs []common.Hash) ([]types.Receipts, sttypes.StreamID, error) {
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ receipts, stid, err := r.configs.protocol.GetReceipts(ctx, hs)
+ if err != nil {
+ return nil, stid, err
+ }
+ if err := validateGetReceiptsResult(hs, receipts); err != nil {
+ return nil, stid, err
+ }
+ return receipts, stid, nil
+}
+
+func validateGetReceiptsResult(requested []common.Hash, result []types.Receipts) error {
+ // TODO: validate each receipt here
+
+ return nil
+}
+
+func (r *StageReceipts) saveProgress(ctx context.Context, s *StageState, progress uint64, tx kv.RwTx) (err error) {
+ useInternalTx := tx == nil
+ if useInternalTx {
+ var err error
+ tx, err = r.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ // save progress
+ if err = s.Update(tx, progress); err != nil {
+ utils.Logger().Error().
+ Err(err).
+ Msgf("[STAGED_SYNC] saving progress for receipt stage failed")
+ return ErrSavingBodiesProgressFail
+ }
+
+ if useInternalTx {
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *StageReceipts) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) {
+ useInternalTx := tx == nil
+ if useInternalTx {
+ tx, err = r.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ if err = u.Done(tx); err != nil {
+ return err
+ }
+
+ if useInternalTx {
+ if err = tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *StageReceipts) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) {
+ useInternalTx := tx == nil
+ if useInternalTx {
+ tx, err = r.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ if useInternalTx {
+ if err = tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/api/service/stagedstreamsync/stage_short_range.go b/api/service/stagedstreamsync/stage_short_range.go
index ce6cdf36bc..d771cd6606 100644
--- a/api/service/stagedstreamsync/stage_short_range.go
+++ b/api/service/stagedstreamsync/stage_short_range.go
@@ -136,6 +136,8 @@ func (sr *StageShortRange) doShortRangeSync(ctx context.Context, s *StageState)
sh.streamsFailed(whitelist, "remote nodes cannot provide blocks with target hashes")
}
+ utils.Logger().Info().Int("num blocks", len(blocks)).Msg("getBlockByHashes result")
+
n, err := verifyAndInsertBlocks(sr.configs.bc, blocks)
numBlocksInsertedShortRangeHistogramVec.With(s.state.promLabels()).Observe(float64(n))
if err != nil {
diff --git a/api/service/stagedstreamsync/stage_state.go b/api/service/stagedstreamsync/stage_states.go
similarity index 95%
rename from api/service/stagedstreamsync/stage_state.go
rename to api/service/stagedstreamsync/stage_states.go
index b8dfb18288..1b668786cf 100644
--- a/api/service/stagedstreamsync/stage_state.go
+++ b/api/service/stagedstreamsync/stage_states.go
@@ -53,6 +53,11 @@ func NewStageStatesCfg(
// Exec progresses States stage in the forward direction
func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) {
+ // only execute this stage in full sync mode
+ if s.state.status.cycleSyncMode != FullSync {
+ return nil
+ }
+
// for short range sync, skip this step
if !s.state.initSync {
return nil
@@ -64,11 +69,11 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR
}
maxHeight := s.state.status.targetBN
- currentHead := stg.configs.bc.CurrentBlock().NumberU64()
+ currentHead := s.state.CurrentBlockNumber()
if currentHead >= maxHeight {
return nil
}
- currProgress := stg.configs.bc.CurrentBlock().NumberU64()
+ currProgress := currentHead
targetHeight := s.state.currentCycle.TargetHeight
if currProgress >= targetHeight {
return nil
@@ -110,7 +115,10 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR
for i := currProgress + 1; i <= targetHeight; i++ {
blkKey := marshalData(i)
- loopID, streamID := gbm.GetDownloadDetails(i)
+ loopID, streamID, errBDD := gbm.GetDownloadDetails(i)
+ if errBDD != nil {
+ return errBDD
+ }
blockBytes, err := txs[loopID].GetOne(BlocksBucket, blkKey)
if err != nil {
@@ -157,6 +165,10 @@ func (stg *StageStates) Exec(ctx context.Context, firstCycle bool, invalidBlockR
return ErrInvalidBlockNumber
}
+ if stg.configs.bc.HasBlock(block.Hash(), block.NumberU64()) {
+ continue
+ }
+
if err := verifyAndInsertBlock(stg.configs.bc, block); err != nil {
stg.configs.logger.Warn().Err(err).Uint64("cycle target block", targetHeight).
Uint64("block number", block.NumberU64()).
diff --git a/api/service/stagedstreamsync/stage_statesync.go b/api/service/stagedstreamsync/stage_statesync.go
new file mode 100644
index 0000000000..3ce733f41f
--- /dev/null
+++ b/api/service/stagedstreamsync/stage_statesync.go
@@ -0,0 +1,317 @@
+package stagedstreamsync
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/harmony-one/harmony/core"
+ "github.com/harmony-one/harmony/internal/utils"
+ sttypes "github.com/harmony-one/harmony/p2p/stream/types"
+ "github.com/harmony-one/harmony/shard"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/rs/zerolog"
+)
+
+type StageStateSync struct {
+ configs StageStateSyncCfg
+}
+
+type StageStateSyncCfg struct {
+ bc core.BlockChain
+ db kv.RwDB
+ concurrency int
+ protocol syncProtocol
+ logger zerolog.Logger
+ logProgress bool
+}
+
+func NewStageStateSync(cfg StageStateSyncCfg) *StageStateSync {
+ return &StageStateSync{
+ configs: cfg,
+ }
+}
+
+func NewStageStateSyncCfg(bc core.BlockChain,
+ db kv.RwDB,
+ concurrency int,
+ protocol syncProtocol,
+ logger zerolog.Logger,
+ logProgress bool) StageStateSyncCfg {
+
+ return StageStateSyncCfg{
+ bc: bc,
+ db: db,
+ concurrency: concurrency,
+ protocol: protocol,
+ logger: logger,
+ logProgress: logProgress,
+ }
+}
+
+// Exec progresses States stage in the forward direction
+func (sss *StageStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) {
+
+ // for short range sync, skip this step
+ if !s.state.initSync {
+ return nil
+ }
+
+ // shouldn't execute for epoch chain
+ if sss.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode {
+ return nil
+ }
+
+ // only execute this stage in fast/snap sync mode and once we reach to pivot
+ if s.state.status.pivotBlock == nil ||
+ s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() ||
+ s.state.status.statesSynced {
+ return nil
+ }
+
+ // maxHeight := s.state.status.targetBN
+ // currentHead := s.state.CurrentBlockNumber()
+ // if currentHead >= maxHeight {
+ // return nil
+ // }
+ // currProgress := s.state.CurrentBlockNumber()
+ // targetHeight := s.state.currentCycle.TargetHeight
+
+ // if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error {
+ // if currProgress, err = s.CurrentStageProgress(etx); err != nil {
+ // return err
+ // }
+ // return nil
+ // }); errV != nil {
+ // return errV
+ // }
+
+ // if currProgress >= targetHeight {
+ // return nil
+ // }
+ useInternalTx := tx == nil
+ if useInternalTx {
+ var err error
+ tx, err = sss.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ // isLastCycle := targetHeight >= maxHeight
+ startTime := time.Now()
+
+ if sss.configs.logProgress {
+ fmt.Print("\033[s") // save the cursor position
+ }
+
+ // Fetch states from neighbors
+ // pivotRootHash := s.state.status.pivotBlock.Root()
+ currentBlockRootHash := s.state.bc.CurrentFastBlock().Root()
+ sdm := newStateDownloadManager(tx, sss.configs.bc, sss.configs.concurrency, s.state.logger)
+ sdm.setRootHash(currentBlockRootHash)
+ var wg sync.WaitGroup
+ for i := 0; i < s.state.config.Concurrency; i++ {
+ wg.Add(1)
+ go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s)
+ }
+ wg.Wait()
+
+ // insert block
+ if err := sss.configs.bc.WriteHeadBlock(s.state.status.pivotBlock); err != nil {
+ sss.configs.logger.Warn().Err(err).
+ Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()).
+ Msg(WrapStagedSyncMsg("insert pivot block failed"))
+ // TODO: panic("pivot block is failed to insert in chain.")
+ return err
+ }
+
+ // states should be fully synced in this stage
+ s.state.status.statesSynced = true
+
+ /*
+ gbm := s.state.gbm
+
+ // Setup workers to fetch states from remote node
+ var wg sync.WaitGroup
+ curHeight := s.state.CurrentBlockNumber()
+
+ for bn := curHeight + 1; bn <= gbm.targetBN; bn++ {
+ root := gbm.GetRootHash(bn)
+ if root == emptyHash {
+ continue
+ }
+ sdm.setRootHash(root)
+ for i := 0; i < s.state.config.Concurrency; i++ {
+ wg.Add(1)
+ go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s)
+ }
+ wg.Wait()
+ }
+ */
+
+ if useInternalTx {
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// runStateWorkerLoop creates a work loop for download states
+func (sss *StageStateSync) runStateWorkerLoop(ctx context.Context, sdm *StateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time, s *StageState) {
+
+ defer wg.Done()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ nodes, paths, codes, err := sdm.GetNextBatch()
+ if len(nodes)+len(codes) == 0 || err != nil {
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(100 * time.Millisecond):
+ return
+ }
+ }
+ data, stid, err := sss.downloadStates(ctx, nodes, codes)
+ if err != nil {
+ if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
+ sss.configs.protocol.StreamFailed(stid, "downloadStates failed")
+ }
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("downloadStates failed"))
+ err = errors.Wrap(err, "request error")
+ sdm.HandleRequestError(codes, paths, stid, err)
+ } else if data == nil || len(data) == 0 {
+ utils.Logger().Warn().
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("downloadStates failed, received empty data bytes"))
+ err := errors.New("downloadStates received empty data bytes")
+ sdm.HandleRequestError(codes, paths, stid, err)
+ } else {
+ sdm.HandleRequestResult(nodes, paths, data, loopID, stid)
+ if sss.configs.logProgress {
+ //calculating block download speed
+ dt := time.Now().Sub(startTime).Seconds()
+ speed := float64(0)
+ if dt > 0 {
+ speed = float64(len(data)) / dt
+ }
+ stateDownloadSpeed := fmt.Sprintf("%.2f", speed)
+
+ fmt.Print("\033[u\033[K") // restore the cursor position and clear the line
+ fmt.Println("state download speed:", stateDownloadSpeed, "states/s")
+ }
+ }
+ }
+}
+
+func (sss *StageStateSync) downloadStates(ctx context.Context, nodes []common.Hash, codes []common.Hash) ([][]byte, sttypes.StreamID, error) {
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ hashes := append(codes, nodes...)
+ data, stid, err := sss.configs.protocol.GetNodeData(ctx, hashes)
+ if err != nil {
+ return nil, stid, err
+ }
+ if err := validateGetNodeDataResult(hashes, data); err != nil {
+ return nil, stid, err
+ }
+ return data, stid, nil
+}
+
+func validateGetNodeDataResult(requested []common.Hash, result [][]byte) error {
+ if len(result) != len(requested) {
+ return fmt.Errorf("unexpected number of nodes delivered: %v / %v", len(result), len(requested))
+ }
+ return nil
+}
+
+func (stg *StageStateSync) insertChain(gbm *blockDownloadManager,
+ protocol syncProtocol,
+ lbls prometheus.Labels,
+ targetBN uint64) {
+
+}
+
+func (stg *StageStateSync) saveProgress(s *StageState, tx kv.RwTx) (err error) {
+
+ useInternalTx := tx == nil
+ if useInternalTx {
+ var err error
+ tx, err = stg.configs.db.BeginRw(context.Background())
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ // save progress
+ if err = s.Update(tx, s.state.CurrentBlockNumber()); err != nil {
+ utils.Logger().Error().
+ Err(err).
+ Msgf("[STAGED_SYNC] saving progress for block States stage failed")
+ return ErrSaveStateProgressFail
+ }
+
+ if useInternalTx {
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (stg *StageStateSync) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) {
+ useInternalTx := tx == nil
+ if useInternalTx {
+ tx, err = stg.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ if err = u.Done(tx); err != nil {
+ return err
+ }
+
+ if useInternalTx {
+ if err = tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (stg *StageStateSync) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) {
+ useInternalTx := tx == nil
+ if useInternalTx {
+ tx, err = stg.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ if useInternalTx {
+ if err = tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/api/service/stagedstreamsync/stage_statesync_full.go b/api/service/stagedstreamsync/stage_statesync_full.go
new file mode 100644
index 0000000000..f5bd213af5
--- /dev/null
+++ b/api/service/stagedstreamsync/stage_statesync_full.go
@@ -0,0 +1,495 @@
+package stagedstreamsync
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/harmony-one/harmony/core"
+ "github.com/harmony-one/harmony/internal/utils"
+ sttypes "github.com/harmony-one/harmony/p2p/stream/types"
+ "github.com/harmony-one/harmony/shard"
+ "github.com/pkg/errors"
+
+ //sttypes "github.com/harmony-one/harmony/p2p/stream/types"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/rs/zerolog"
+)
+
+type StageFullStateSync struct {
+ configs StageFullStateSyncCfg
+}
+
+type StageFullStateSyncCfg struct {
+ bc core.BlockChain
+ db kv.RwDB
+ concurrency int
+ protocol syncProtocol
+ logger zerolog.Logger
+ logProgress bool
+}
+
+func NewStageFullStateSync(cfg StageFullStateSyncCfg) *StageFullStateSync {
+ return &StageFullStateSync{
+ configs: cfg,
+ }
+}
+
+func NewStageFullStateSyncCfg(bc core.BlockChain,
+ db kv.RwDB,
+ concurrency int,
+ protocol syncProtocol,
+ logger zerolog.Logger,
+ logProgress bool) StageFullStateSyncCfg {
+
+ return StageFullStateSyncCfg{
+ bc: bc,
+ db: db,
+ concurrency: concurrency,
+ protocol: protocol,
+ logger: logger,
+ logProgress: logProgress,
+ }
+}
+
+// Exec progresses States stage in the forward direction
+func (sss *StageFullStateSync) Exec(ctx context.Context, bool, invalidBlockRevert bool, s *StageState, reverter Reverter, tx kv.RwTx) (err error) {
+
+ // for short range sync, skip this step
+ if !s.state.initSync {
+ return nil
+ }
+
+ // shouldn't execute for epoch chain
+ if sss.configs.bc.ShardID() == shard.BeaconChainShardID && !s.state.isBeaconNode {
+ return nil
+ }
+
+ // if states are already synced, don't execute this stage
+ if s.state.status.statesSynced {
+ return
+ }
+
+ // only execute this stage in fast/snap sync mode and once we reach to pivot
+ if s.state.status.pivotBlock == nil ||
+ s.state.CurrentBlockNumber() != s.state.status.pivotBlock.NumberU64() ||
+ s.state.status.statesSynced {
+ return nil
+ }
+
+ // maxHeight := s.state.status.targetBN
+ // currentHead := s.state.CurrentBlockNumber()
+ // if currentHead >= maxHeight {
+ // return nil
+ // }
+ // targetHeight := s.state.currentCycle.TargetHeight
+
+ currProgress := uint64(0)
+ if errV := CreateView(ctx, sss.configs.db, tx, func(etx kv.Tx) error {
+ if currProgress, err = s.CurrentStageProgress(etx); err != nil {
+ return err
+ }
+ return nil
+ }); errV != nil {
+ return errV
+ }
+ if currProgress >= s.state.status.pivotBlock.NumberU64() {
+ return nil
+ }
+
+ useInternalTx := tx == nil
+ if useInternalTx {
+ var err error
+ tx, err = sss.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ // isLastCycle := targetHeight >= maxHeight
+ startTime := time.Now()
+
+ if sss.configs.logProgress {
+ fmt.Print("\033[s") // save the cursor position
+ }
+
+ // Fetch states from neighbors
+ currentBlockRootHash := s.state.bc.CurrentFastBlock().Root()
+ scheme := sss.configs.bc.TrieDB().Scheme()
+ sdm := newFullStateDownloadManager(sss.configs.bc.ChainDb(), scheme, tx, sss.configs.bc, sss.configs.concurrency, s.state.logger)
+ sdm.setRootHash(currentBlockRootHash)
+
+ sdm.SyncStarted()
+ var wg sync.WaitGroup
+ for i := 0; i < s.state.config.Concurrency; i++ {
+ wg.Add(1)
+ go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s)
+ }
+ wg.Wait()
+
+ // insert block
+ if err := sss.configs.bc.WriteHeadBlock(s.state.status.pivotBlock); err != nil {
+ sss.configs.logger.Warn().Err(err).
+ Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()).
+ Msg(WrapStagedSyncMsg("insert pivot block failed"))
+ // TODO: panic("pivot block is failed to insert in chain.")
+ return err
+ }
+
+ // states should be fully synced in this stage
+ s.state.status.statesSynced = true
+
+ if err := sss.saveProgress(s, tx); err != nil {
+ sss.configs.logger.Warn().Err(err).
+ Uint64("pivot block number", s.state.status.pivotBlock.NumberU64()).
+ Msg(WrapStagedSyncMsg("save progress for statesync stage failed"))
+ }
+
+ /*
+ gbm := s.state.gbm
+
+ // Setup workers to fetch states from remote node
+ var wg sync.WaitGroup
+ curHeight := s.state.CurrentBlockNumber()
+
+ for bn := curHeight + 1; bn <= gbm.targetBN; bn++ {
+ root := gbm.GetRootHash(bn)
+ if root == emptyHash {
+ continue
+ }
+ sdm.setRootHash(root)
+ for i := 0; i < s.state.config.Concurrency; i++ {
+ wg.Add(1)
+ go sss.runStateWorkerLoop(ctx, sdm, &wg, i, startTime, s)
+ }
+ wg.Wait()
+ }
+ */
+
+ if useInternalTx {
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// runStateWorkerLoop creates a work loop for download states
+func (sss *StageFullStateSync) runStateWorkerLoop(ctx context.Context, sdm *FullStateDownloadManager, wg *sync.WaitGroup, loopID int, startTime time.Time, s *StageState) {
+
+ defer wg.Done()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ accountTasks, codes, storages, healtask, codetask, nTasks, err := sdm.GetNextBatch()
+ if nTasks == 0 {
+ utils.Logger().Debug().Msg("the state worker loop received no more tasks")
+ return
+ }
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ return
+ case <-time.After(100 * time.Millisecond):
+ return
+ }
+ }
+
+ if accountTasks != nil && len(accountTasks) > 0 {
+
+ task := accountTasks[0]
+ origin := task.Next
+ limit := task.Last
+ root := task.root
+ cap := task.cap
+ retAccounts, proof, stid, err := sss.configs.protocol.GetAccountRange(ctx, root, origin, limit, uint64(cap))
+ if err != nil {
+ if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
+ sss.configs.protocol.StreamFailed(stid, "GetAccountRange failed")
+ }
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetAccountRange failed"))
+ err = errors.Wrap(err, "request error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ } else if retAccounts == nil || len(retAccounts) == 0 {
+ utils.Logger().Warn().
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetAccountRange failed, received empty accounts"))
+ //err := errors.New("GetAccountRange received empty slots")
+ //sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+ if err := sdm.HandleAccountRequestResult(task, retAccounts, proof, origin[:], limit[:], loopID, stid); err != nil {
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetAccountRange handle result failed"))
+ err = errors.Wrap(err, "handle result error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+
+ } else if codes != nil && len(codes) > 0 {
+
+ stid, err := sss.downloadByteCodes(ctx, sdm, codes, loopID)
+ if err != nil {
+ if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
+ sss.configs.protocol.StreamFailed(stid, "downloadByteCodes failed")
+ }
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("downloadByteCodes failed"))
+ err = errors.Wrap(err, "request error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+
+ } else if storages != nil && len(storages.accounts) > 0 {
+
+ root := storages.root
+ roots := storages.roots
+ accounts := storages.accounts
+ cap := storages.cap
+ origin := storages.origin
+ limit := storages.limit
+ mainTask := storages.mainTask
+ subTask := storages.subtask
+
+ slots, proof, stid, err := sss.configs.protocol.GetStorageRanges(ctx, root, accounts, origin, limit, uint64(cap))
+ if err != nil {
+ if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
+ sss.configs.protocol.StreamFailed(stid, "GetStorageRanges failed")
+ }
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetStorageRanges failed"))
+ err = errors.Wrap(err, "request error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ } else if slots == nil || len(slots) == 0 {
+ utils.Logger().Warn().
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetStorageRanges failed, received empty slots"))
+ err := errors.New("GetStorageRanges received empty slots")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+ if err := sdm.HandleStorageRequestResult(mainTask, subTask, accounts, roots, origin, limit, slots, proof, loopID, stid); err != nil {
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetStorageRanges handle result failed"))
+ err = errors.Wrap(err, "handle result error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+
+ } else {
+ // assign trie node Heal Tasks
+ if healtask != nil && len(healtask.hashes) > 0 {
+ root := healtask.root
+ task := healtask.task
+ hashes := healtask.hashes
+ pathsets := healtask.pathsets
+ paths := healtask.paths
+ bytes := healtask.bytes
+
+ nodes, stid, err := sss.configs.protocol.GetTrieNodes(ctx, root, pathsets, uint64(bytes))
+ if err != nil {
+ if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
+ sss.configs.protocol.StreamFailed(stid, "GetTrieNodes failed")
+ }
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetTrieNodes failed"))
+ err = errors.Wrap(err, "request error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ } else if nodes == nil || len(nodes) == 0 {
+ utils.Logger().Warn().
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetTrieNodes failed, received empty nodes"))
+ err := errors.New("GetTrieNodes received empty nodes")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+ if err := sdm.HandleTrieNodeHealRequestResult(task, paths, hashes, nodes, loopID, stid); err != nil {
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetTrieNodes handle result failed"))
+ err = errors.Wrap(err, "handle result error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+ }
+
+ if codetask != nil && len(codetask.hashes) > 0 {
+ task := codetask.task
+ hashes := codetask.hashes
+ bytes := codetask.bytes
+ retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, hashes, uint64(bytes))
+ if err != nil {
+ if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) {
+ sss.configs.protocol.StreamFailed(stid, "GetByteCodes failed")
+ }
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetByteCodes failed"))
+ err = errors.Wrap(err, "request error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ } else if retCodes == nil || len(retCodes) == 0 {
+ utils.Logger().Warn().
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetByteCodes failed, received empty codes"))
+ err := errors.New("GetByteCodes received empty codes")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+ if err := sdm.HandleBytecodeRequestResult(task, hashes, retCodes, loopID, stid); err != nil {
+ utils.Logger().Error().
+ Err(err).
+ Str("stream", string(stid)).
+ Msg(WrapStagedSyncMsg("GetByteCodes handle result failed"))
+ err = errors.Wrap(err, "handle result error")
+ sdm.HandleRequestError(accountTasks, codes, storages, healtask, codetask, stid, err)
+ return
+ }
+ }
+ }
+ }
+}
+
+func (sss *StageFullStateSync) downloadByteCodes(ctx context.Context, sdm *FullStateDownloadManager, codeTasks []*byteCodeTasksBundle, loopID int) (stid sttypes.StreamID, err error) {
+ for _, codeTask := range codeTasks {
+ // try to get byte codes from remote peer
+ // if any of them failed, the stid will be the id of the failed stream
+ retCodes, stid, err := sss.configs.protocol.GetByteCodes(ctx, codeTask.hashes, uint64(codeTask.cap))
+ if err != nil {
+ return stid, err
+ }
+ if len(retCodes) == 0 {
+ return stid, errors.New("empty codes array")
+ }
+ if err = sdm.HandleBytecodeRequestResult(codeTask.task, codeTask.hashes, retCodes, loopID, stid); err != nil {
+ return stid, err
+ }
+ }
+ return
+}
+
+// func (sss *StageFullStateSync) downloadStates(ctx context.Context,
+// root common.Hash,
+// origin common.Hash,
+// accounts []*accountTask,
+// codes []common.Hash,
+// storages *storageTaskBundle) ([][]byte, sttypes.StreamID, error) {
+
+// ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+// defer cancel()
+
+// // if there is any account task, first we have to complete that
+// if len(accounts) > 0 {
+
+// }
+// // hashes := append(codes, nodes...)
+// // data, stid, err := sss.configs.protocol.GetNodeData(ctx, hashes)
+// // if err != nil {
+// // return nil, stid, err
+// // }
+// // if err := validateGetNodeDataResult(hashes, data); err != nil {
+// // return nil, stid, err
+// // }
+// return data, stid, nil
+// }
+
+func (stg *StageFullStateSync) insertChain(gbm *blockDownloadManager,
+ protocol syncProtocol,
+ lbls prometheus.Labels,
+ targetBN uint64) {
+
+}
+
+func (stg *StageFullStateSync) saveProgress(s *StageState, tx kv.RwTx) (err error) {
+
+ useInternalTx := tx == nil
+ if useInternalTx {
+ var err error
+ tx, err = stg.configs.db.BeginRw(context.Background())
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ // save progress
+ if err = s.Update(tx, s.state.status.pivotBlock.NumberU64()); err != nil {
+ utils.Logger().Error().
+ Err(err).
+ Msgf("[STAGED_SYNC] saving progress for block States stage failed")
+ return ErrSaveStateProgressFail
+ }
+
+ if useInternalTx {
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (stg *StageFullStateSync) Revert(ctx context.Context, firstCycle bool, u *RevertState, s *StageState, tx kv.RwTx) (err error) {
+ useInternalTx := tx == nil
+ if useInternalTx {
+ tx, err = stg.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ if err = u.Done(tx); err != nil {
+ return err
+ }
+
+ if useInternalTx {
+ if err = tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (stg *StageFullStateSync) CleanUp(ctx context.Context, firstCycle bool, p *CleanUpState, tx kv.RwTx) (err error) {
+ useInternalTx := tx == nil
+ if useInternalTx {
+ tx, err = stg.configs.db.BeginRw(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+ }
+
+ if useInternalTx {
+ if err = tx.Commit(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/api/service/stagedstreamsync/staged_stream_sync.go b/api/service/stagedstreamsync/staged_stream_sync.go
index 1592186b52..1782068b29 100644
--- a/api/service/stagedstreamsync/staged_stream_sync.go
+++ b/api/service/stagedstreamsync/staged_stream_sync.go
@@ -16,6 +16,7 @@ import (
"github.com/harmony-one/harmony/internal/utils"
syncproto "github.com/harmony-one/harmony/p2p/stream/protocols/sync"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
+ "github.com/harmony-one/harmony/shard"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
@@ -59,23 +60,22 @@ func (ib *InvalidBlock) addBadStream(bsID sttypes.StreamID) {
}
type StagedStreamSync struct {
- bc core.BlockChain
- consensus *consensus.Consensus
- isBeacon bool
- isExplorer bool
- db kv.RwDB
- protocol syncProtocol
- isBeaconNode bool
- gbm *blockDownloadManager // initialized when finished get block number
- lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus
- lastMileMux sync.Mutex
- inserted int
- config Config
- logger zerolog.Logger
- status *status //TODO: merge this with currentSyncCycle
- initSync bool // if sets to true, node start long range syncing
- UseMemDB bool
-
+ bc core.BlockChain
+ consensus *consensus.Consensus
+ isBeacon bool
+ isExplorer bool
+ db kv.RwDB
+ protocol syncProtocol
+ isBeaconNode bool
+ gbm *blockDownloadManager // initialized when finished get block number
+ lastMileBlocks []*types.Block // last mile blocks to catch up with the consensus
+ lastMileMux sync.Mutex
+ inserted int
+ config Config
+ logger zerolog.Logger
+ status *status //TODO: merge this with currentSyncCycle
+ initSync bool // if sets to true, node start long range syncing
+ UseMemDB bool
revertPoint *uint64 // used to run stages
prevRevertPoint *uint64 // used to get value from outside of staged sync after cycle (for example to notify RPCDaemon)
invalidBlock InvalidBlock
@@ -267,8 +267,18 @@ func New(
logger zerolog.Logger,
) *StagedStreamSync {
- revertStages := make([]*Stage, len(stagesList))
- for i, stageIndex := range DefaultRevertOrder {
+ forwardStages := make([]*Stage, len(StagesForwardOrder))
+ for i, stageIndex := range StagesForwardOrder {
+ for _, s := range stagesList {
+ if s.ID == stageIndex {
+ forwardStages[i] = s
+ break
+ }
+ }
+ }
+
+ revertStages := make([]*Stage, len(StagesRevertOrder))
+ for i, stageIndex := range StagesRevertOrder {
for _, s := range stagesList {
if s.ID == stageIndex {
revertStages[i] = s
@@ -276,8 +286,9 @@ func New(
}
}
}
- pruneStages := make([]*Stage, len(stagesList))
- for i, stageIndex := range DefaultCleanUpOrder {
+
+ pruneStages := make([]*Stage, len(StagesCleanUpOrder))
+ for i, stageIndex := range StagesCleanUpOrder {
for _, s := range stagesList {
if s.ID == stageIndex {
pruneStages[i] = s
@@ -306,7 +317,7 @@ func New(
inserted: 0,
config: config,
logger: logger,
- stages: stagesList,
+ stages: forwardStages,
currentStage: 0,
revertOrder: revertStages,
pruningOrder: pruneStages,
@@ -327,6 +338,18 @@ func (s *StagedStreamSync) doGetCurrentNumberRequest(ctx context.Context) (uint6
return bn, stid, nil
}
+// doGetBlockByNumberRequest returns block by its number and corresponding stream
+func (s *StagedStreamSync) doGetBlockByNumberRequest(ctx context.Context, bn uint64) (*types.Block, sttypes.StreamID, error) {
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ blocks, stid, err := s.protocol.GetBlocksByNumber(ctx, []uint64{bn}, syncproto.WithHighPriority())
+ if err != nil || len(blocks) != 1 {
+ return nil, stid, err
+ }
+ return blocks[0], stid, nil
+}
+
// promLabels returns a prometheus labels for current shard id
func (s *StagedStreamSync) promLabels() prometheus.Labels {
sid := s.bc.ShardID()
@@ -383,6 +406,11 @@ func (s *StagedStreamSync) Run(ctx context.Context, db kv.RwDB, tx kv.RwTx, firs
continue
}
+ // TODO: enable this part after make sure all works well
+ // if !s.canExecute(stage) {
+ // continue
+ // }
+
if err := s.runStage(ctx, stage, db, tx, firstCycle, s.invalidBlock.Active); err != nil {
utils.Logger().Error().
Err(err).
@@ -409,6 +437,55 @@ func (s *StagedStreamSync) Run(ctx context.Context, db kv.RwDB, tx kv.RwTx, firs
return nil
}
+func (s *StagedStreamSync) canExecute(stage *Stage) bool {
+ // check range mode
+ if stage.RangeMode != LongRangeAndShortRange {
+ isLongRange := s.initSync
+ switch stage.RangeMode {
+ case OnlyLongRange:
+ if !isLongRange {
+ return false
+ }
+ case OnlyShortRange:
+ if isLongRange {
+ return false
+ }
+ default:
+ return false
+ }
+ }
+
+ // check chain execution
+ if stage.ChainExecutionMode != AllChains {
+ shardID := s.bc.ShardID()
+ isBeaconNode := s.isBeaconNode
+ isShardChain := shardID != shard.BeaconChainShardID
+ isEpochChain := shardID == shard.BeaconChainShardID && !isBeaconNode
+ switch stage.ChainExecutionMode {
+ case AllChainsExceptEpochChain:
+ if isEpochChain {
+ return false
+ }
+ case OnlyBeaconNode:
+ if !isBeaconNode {
+ return false
+ }
+ case OnlyShardChain:
+ if !isShardChain {
+ return false
+ }
+ case OnlyEpochChain:
+ if !isEpochChain {
+ return false
+ }
+ default:
+ return false
+ }
+ }
+
+ return true
+}
+
// CreateView creates a view for a given db
func CreateView(ctx context.Context, db kv.RwDB, tx kv.Tx, f func(tx kv.Tx) error) error {
if tx != nil {
@@ -472,7 +549,6 @@ func (s *StagedStreamSync) runStage(ctx context.Context, stage *Stage, db kv.RwD
if err != nil {
return err
}
-
if err = stage.Handler.Exec(ctx, firstCycle, invalidBlockRevert, stageState, s, tx); err != nil {
utils.Logger().Error().
Err(err).
@@ -636,10 +712,15 @@ func (ss *StagedStreamSync) addConsensusLastMile(bc core.BlockChain, cs *consens
if block == nil {
break
}
- if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil {
+ _, err := bc.InsertChain(types.Blocks{block}, true)
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ case errors.Is(err, core.ErrNotLastBlockInEpoch):
+ case err != nil:
return errors.Wrap(err, "failed to InsertChain")
+ default:
+ hashes = append(hashes, block.Header().Hash())
}
- hashes = append(hashes, block.Header().Hash())
}
return nil
})
@@ -704,13 +785,16 @@ func (ss *StagedStreamSync) UpdateBlockAndStatus(block *types.Block, bc core.Blo
}
_, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */)
- if err != nil {
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ case err != nil:
utils.Logger().Error().
Err(err).
Uint64("block number", block.NumberU64()).
Uint32("shard", block.ShardID()).
Msgf("[STAGED_STREAM_SYNC] UpdateBlockAndStatus: Error adding new block to blockchain")
return err
+ default:
}
utils.Logger().Info().
Uint64("blockHeight", block.NumberU64()).
diff --git a/api/service/stagedstreamsync/stages.go b/api/service/stagedstreamsync/stages.go
index 6a21fe7071..33f3b293b0 100644
--- a/api/service/stagedstreamsync/stages.go
+++ b/api/service/stagedstreamsync/stages.go
@@ -8,13 +8,16 @@ import (
type SyncStageID string
const (
- Heads SyncStageID = "Heads" // Heads are downloaded
- ShortRange SyncStageID = "ShortRange" // short range
- SyncEpoch SyncStageID = "SyncEpoch" // epoch sync
- BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified
- States SyncStageID = "States" // will construct most recent state from downloaded blocks
- LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well
- Finish SyncStageID = "Finish" // Nominal stage after all other stages
+ Heads SyncStageID = "Heads" // Heads are downloaded
+ ShortRange SyncStageID = "ShortRange" // short range
+ SyncEpoch SyncStageID = "SyncEpoch" // epoch sync
+ BlockBodies SyncStageID = "BlockBodies" // Block bodies are downloaded, TxHash and UncleHash are getting verified
+ States SyncStageID = "States" // will construct most recent state from downloaded blocks
+ StateSync SyncStageID = "StateSync" // State sync
+ FullStateSync SyncStageID = "FullStateSync" // Full State Sync
+ Receipts SyncStageID = "Receipts" // Receipts
+ LastMile SyncStageID = "LastMile" // update blocks after sync and update last mile blocks as well
+ Finish SyncStageID = "Finish" // Nominal stage after all other stages
)
// GetStageName returns the stage name in string
diff --git a/api/service/stagedstreamsync/state_download_manager.go b/api/service/stagedstreamsync/state_download_manager.go
new file mode 100644
index 0000000000..51eccb8ec7
--- /dev/null
+++ b/api/service/stagedstreamsync/state_download_manager.go
@@ -0,0 +1,432 @@
+package stagedstreamsync
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/harmony-one/harmony/core"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/core/state"
+ "github.com/harmony-one/harmony/internal/utils"
+ sttypes "github.com/harmony-one/harmony/p2p/stream/types"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/rs/zerolog"
+ "golang.org/x/crypto/sha3"
+)
+
+// codeTask represents a single byte code download task, containing a set of
+// peers already attempted retrieval from to detect stalled syncs and abort.
+type codeTask struct {
+ attempts map[sttypes.StreamID]int
+}
+
+// trieTask represents a single trie node download task, containing a set of
+// peers already attempted retrieval from to detect stalled syncs and abort.
+type trieTask struct {
+ hash common.Hash
+ path [][]byte
+ attempts map[sttypes.StreamID]int
+}
+
+type task struct {
+ trieTasks map[string]*trieTask // Set of trie node tasks currently queued for retrieval, indexed by path
+ codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval, indexed by hash
+}
+
+func newTask() *task {
+ return &task{
+ trieTasks: make(map[string]*trieTask),
+ codeTasks: make(map[common.Hash]*codeTask),
+ }
+}
+
+func (t *task) addCodeTask(h common.Hash, ct *codeTask) {
+ t.codeTasks[h] = &codeTask{
+ attempts: ct.attempts,
+ }
+}
+
+func (t *task) getCodeTask(h common.Hash) *codeTask {
+ if task, ok := t.codeTasks[h]; ok {
+ return task
+ }
+ return nil
+}
+
+func (t *task) addNewCodeTask(h common.Hash) {
+ t.codeTasks[h] = &codeTask{
+ attempts: make(map[sttypes.StreamID]int),
+ }
+}
+
+func (t *task) deleteCodeTask(hash common.Hash) {
+ if _, ok := t.codeTasks[hash]; ok {
+ delete(t.codeTasks, hash)
+ }
+}
+
+func (t *task) deleteCodeTaskAttempts(h common.Hash, stID sttypes.StreamID) {
+ if task, ok := t.codeTasks[h]; ok {
+ if _, ok := task.attempts[stID]; ok {
+ delete(t.codeTasks[h].attempts, stID)
+ }
+ }
+}
+
+func (t *task) addTrieTask(path string, tt *trieTask) {
+ t.trieTasks[path] = &trieTask{
+ hash: tt.hash,
+ path: tt.path,
+ attempts: tt.attempts,
+ }
+}
+
+func (t *task) getTrieTask(path string) *trieTask {
+ if task, ok := t.trieTasks[path]; ok {
+ return task
+ }
+ return nil
+}
+
+func (t *task) addNewTrieTask(hash common.Hash, path string) {
+ t.trieTasks[path] = &trieTask{
+ hash: hash,
+ path: trie.NewSyncPath([]byte(path)),
+ attempts: make(map[sttypes.StreamID]int),
+ }
+}
+
+func (t *task) deleteTrieTask(path string) {
+ if _, ok := t.trieTasks[path]; ok {
+ delete(t.trieTasks, path)
+ }
+}
+
+func (t *task) deleteTrieTaskAttempts(path string, stID sttypes.StreamID) {
+ if task, ok := t.trieTasks[path]; ok {
+ if _, ok := task.attempts[stID]; ok {
+ delete(t.trieTasks[path].attempts, stID)
+ }
+ }
+}
+
+// StateDownloadManager is the helper structure for get blocks request management
+type StateDownloadManager struct {
+ bc core.BlockChain
+ tx kv.RwTx
+
+ protocol syncProtocol
+ root common.Hash // State root currently being synced
+ sched *trie.Sync // State trie sync scheduler defining the tasks
+ keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
+ concurrency int
+ logger zerolog.Logger
+ lock sync.Mutex
+
+ numUncommitted int
+ bytesUncommitted int
+
+ tasks *task
+ requesting *task
+ processing *task
+ retries *task
+}
+
+func newStateDownloadManager(tx kv.RwTx,
+ bc core.BlockChain,
+ concurrency int,
+ logger zerolog.Logger) *StateDownloadManager {
+
+ return &StateDownloadManager{
+ bc: bc,
+ tx: tx,
+ keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState),
+ concurrency: concurrency,
+ logger: logger,
+ tasks: newTask(),
+ requesting: newTask(),
+ processing: newTask(),
+ retries: newTask(),
+ }
+}
+
+func (s *StateDownloadManager) setRootHash(root common.Hash) {
+ s.root = root
+ s.sched = state.NewStateSync(root, s.bc.ChainDb(), nil, rawdb.HashScheme)
+}
+
+// fillTasks fills the tasks to send to the remote peer.
+func (s *StateDownloadManager) fillTasks(n int) error {
+ if fill := n - (len(s.tasks.trieTasks) + len(s.tasks.codeTasks)); fill > 0 {
+ paths, hashes, codes := s.sched.Missing(fill)
+ for i, path := range paths {
+ s.tasks.addNewTrieTask(hashes[i], path)
+ }
+ for _, hash := range codes {
+ s.tasks.addNewCodeTask(hash)
+ }
+ }
+ return nil
+}
+
+// getNextBatch returns objects with a maximum of n state download
+// tasks to send to the remote peer.
+func (s *StateDownloadManager) GetNextBatch() (nodes []common.Hash, paths []string, codes []common.Hash, err error) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ cap := StatesPerRequest
+
+ nodes, paths, codes = s.getBatchFromRetries(cap)
+ nItems := len(nodes) + len(codes)
+ cap -= nItems
+
+ if cap > 0 {
+ // Refill available tasks from the scheduler.
+ if s.sched.Pending() == 0 {
+ return
+ }
+
+ if err = s.commit(false); err != nil {
+ return
+ }
+
+ if err = s.fillTasks(cap); err != nil {
+ return
+ }
+ newNodes, newPaths, newCodes := s.getBatchFromUnprocessed(cap)
+ nodes = append(nodes, newNodes...)
+ paths = append(paths, newPaths...)
+ codes = append(codes, newCodes...)
+ }
+ return
+}
+
+func (s *StateDownloadManager) commit(force bool) error {
+ if !force && s.bytesUncommitted < ethdb.IdealBatchSize {
+ return nil
+ }
+ start := time.Now()
+ b := s.bc.ChainDb().NewBatch()
+ if err := s.sched.Commit(b); err != nil {
+ return err
+ }
+ if err := b.Write(); err != nil {
+ return fmt.Errorf("DB write error: %v", err)
+ }
+ s.updateStats(s.numUncommitted, 0, 0, time.Since(start))
+ s.numUncommitted = 0
+ s.bytesUncommitted = 0
+ return nil
+}
+
+// updateStats bumps the various state sync progress counters and displays a log
+// message for the user to see.
+func (s *StateDownloadManager) updateStats(written, duplicate, unexpected int, duration time.Duration) {
+ // TODO: here it updates the stats for total pending, processed, duplicates and unexpected
+
+ // for now, we just jog current stats
+ if written > 0 || duplicate > 0 || unexpected > 0 {
+ utils.Logger().Info().
+ Int("count", written).
+ Int("duplicate", duplicate).
+ Int("unexpected", unexpected).
+ Msg("Imported new state entries")
+ }
+}
+
+// getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download
+// tasks to send to the remote peer.
+func (s *StateDownloadManager) getBatchFromUnprocessed(n int) (nodes []common.Hash, paths []string, codes []common.Hash) {
+ // over trie nodes as those can be written to disk and forgotten about.
+ nodes = make([]common.Hash, 0, n)
+ paths = make([]string, 0, n)
+ codes = make([]common.Hash, 0, n)
+
+ for hash, t := range s.tasks.codeTasks {
+ // Stop when we've gathered enough requests
+ if len(nodes)+len(codes) == n {
+ break
+ }
+ codes = append(codes, hash)
+ s.requesting.addCodeTask(hash, t)
+ s.tasks.deleteCodeTask(hash)
+ }
+ for path, t := range s.tasks.trieTasks {
+ // Stop when we've gathered enough requests
+ if len(nodes)+len(codes) == n {
+ break
+ }
+ nodes = append(nodes, t.hash)
+ paths = append(paths, path)
+ s.requesting.addTrieTask(path, t)
+ s.tasks.deleteTrieTask(path)
+ }
+ return nodes, paths, codes
+}
+
+// getBatchFromRetries get the block number batch to be requested from retries.
+func (s *StateDownloadManager) getBatchFromRetries(n int) ([]common.Hash, []string, []common.Hash) {
+ // over trie nodes as those can be written to disk and forgotten about.
+ nodes := make([]common.Hash, 0, n)
+ paths := make([]string, 0, n)
+ codes := make([]common.Hash, 0, n)
+
+ for hash, t := range s.retries.codeTasks {
+ // Stop when we've gathered enough requests
+ if len(nodes)+len(codes) == n {
+ break
+ }
+ codes = append(codes, hash)
+ s.requesting.addCodeTask(hash, t)
+ s.retries.deleteCodeTask(hash)
+ }
+ for path, t := range s.retries.trieTasks {
+ // Stop when we've gathered enough requests
+ if len(nodes)+len(codes) == n {
+ break
+ }
+ nodes = append(nodes, t.hash)
+ paths = append(paths, path)
+ s.requesting.addTrieTask(path, t)
+ s.retries.deleteTrieTask(path)
+ }
+ return nodes, paths, codes
+}
+
+// HandleRequestError handles the error result
+func (s *StateDownloadManager) HandleRequestError(codeHashes []common.Hash, triePaths []string, streamID sttypes.StreamID, err error) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ // add requested code hashes to retries
+ for _, h := range codeHashes {
+ task := s.requesting.getCodeTask(h)
+ s.retries.addCodeTask(h, task)
+ s.requesting.deleteCodeTask(h)
+ }
+
+ // add requested trie paths to retries
+ for _, path := range triePaths {
+ task := s.requesting.getTrieTask(path)
+ s.retries.addTrieTask(path, task)
+ s.requesting.deleteTrieTask(path)
+ }
+}
+
+// HandleRequestResult handles get trie paths and code hashes result
+func (s *StateDownloadManager) HandleRequestResult(codeHashes []common.Hash, triePaths []string, response [][]byte, loopID int, streamID sttypes.StreamID) error {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ // Collect processing stats and update progress if valid data was received
+ duplicate, unexpected, successful := 0, 0, 0
+
+ for _, blob := range response {
+ hash, err := s.processNodeData(codeHashes, triePaths, blob)
+ switch err {
+ case nil:
+ s.numUncommitted++
+ s.bytesUncommitted += len(blob)
+ successful++
+ case trie.ErrNotRequested:
+ unexpected++
+ case trie.ErrAlreadyProcessed:
+ duplicate++
+ default:
+ return fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err)
+ }
+ }
+
+ for _, path := range triePaths {
+ task := s.requesting.getTrieTask(path)
+ if task == nil {
+ // it is already removed from requesting
+ // either it has been completed and deleted by processNodeData or it does not exist
+ continue
+ }
+ // If the node did deliver something, missing items may be due to a protocol
+ // limit or a previous timeout + delayed delivery. Both cases should permit
+ // the node to retry the missing items (to avoid single-peer stalls).
+ if len(response) > 0 { //TODO: if timeout also do same
+ s.requesting.deleteTrieTaskAttempts(path, streamID)
+ } else if task.attempts[streamID] >= MaxTriesToFetchNodeData {
+ // If we've requested the node too many times already, it may be a malicious
+ // sync where nobody has the right data. Abort.
+ return fmt.Errorf("trie node %s failed with peer %s (%d tries)", task.hash.TerminalString(), streamID, task.attempts[streamID])
+ }
+ // Missing item, place into the retry queue.
+ s.retries.addTrieTask(path, task)
+ s.requesting.deleteTrieTask(path)
+ }
+
+ for _, hash := range codeHashes {
+ task := s.requesting.getCodeTask(hash)
+ if task == nil {
+ // it is already removed from requesting
+ // either it has been completed and deleted by processNodeData or it does not exist
+ continue
+ }
+ // If the node did deliver something, missing items may be due to a protocol
+ // limit or a previous timeout + delayed delivery. Both cases should permit
+ // the node to retry the missing items (to avoid single-peer stalls).
+ if len(response) > 0 { //TODO: if timeout also do same
+ s.requesting.deleteCodeTaskAttempts(hash, streamID) //TODO: do we need delete attempts???
+ } else if task.attempts[streamID] >= MaxTriesToFetchNodeData {
+ // If we've requested the node too many times already, it may be a malicious
+ // sync where nobody has the right data. Abort.
+ return fmt.Errorf("byte code %s failed with peer %s (%d tries)", hash.TerminalString(), streamID, task.attempts[streamID])
+ }
+ // Missing item, place into the retry queue.
+ s.retries.addCodeTask(hash, task)
+ s.requesting.deleteCodeTask(hash)
+ }
+
+ return nil
+}
+
+// processNodeData tries to inject a trie node data blob delivered from a remote
+// peer into the state trie, returning whether anything useful was written or any
+// error occurred.
+//
+// If multiple requests correspond to the same hash, this method will inject the
+// blob as a result for the first one only, leaving the remaining duplicates to
+// be fetched again.
+func (s *StateDownloadManager) processNodeData(codeHashes []common.Hash, triePaths []string, responseData []byte) (common.Hash, error) {
+ var hash common.Hash
+ s.keccak.Reset()
+ s.keccak.Write(responseData)
+ s.keccak.Read(hash[:])
+
+ //TODO: remove from requesting
+ if _, present := s.requesting.codeTasks[hash]; present {
+ err := s.sched.ProcessCode(trie.CodeSyncResult{
+ Hash: hash,
+ Data: responseData,
+ })
+ s.requesting.deleteCodeTask(hash)
+ return hash, err
+ }
+ for _, path := range triePaths {
+ task := s.requesting.getTrieTask(path)
+ if task == nil {
+ // this shouldn't happen while the path is given from triPaths and triPaths
+ // are given from requesting queue
+ continue
+ }
+ if task.hash == hash {
+ err := s.sched.ProcessNode(trie.NodeSyncResult{
+ Path: path,
+ Data: responseData,
+ })
+ s.requesting.deleteTrieTask(path)
+ return hash, err
+ }
+ }
+ return common.Hash{}, trie.ErrNotRequested
+}
diff --git a/api/service/stagedstreamsync/state_sync_full.go b/api/service/stagedstreamsync/state_sync_full.go
new file mode 100644
index 0000000000..14cdb1f594
--- /dev/null
+++ b/api/service/stagedstreamsync/state_sync_full.go
@@ -0,0 +1,2439 @@
+package stagedstreamsync
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ gomath "math"
+ "math/big"
+ "math/rand"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie"
+
+ //"github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/harmony-one/harmony/common/math"
+ "github.com/harmony-one/harmony/core"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/core/state"
+ "github.com/harmony-one/harmony/internal/utils"
+ "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
+ sttypes "github.com/harmony-one/harmony/p2p/stream/types"
+ "github.com/ledgerwatch/erigon-lib/kv"
+ "github.com/ledgerwatch/log/v3"
+ "github.com/pkg/errors"
+ "github.com/rs/zerolog"
+ "golang.org/x/crypto/sha3"
+ // "github.com/ethereum/go-ethereum/eth/protocols/snap/range"
+)
+
+const (
+ // minRequestSize is the minimum number of bytes to request from a remote peer.
+ // This number is used as the low cap for account and storage range requests.
+ // Bytecode and trienode are limited inherently by item count (1).
+ minRequestSize = 64 * 1024
+
+ // maxRequestSize is the maximum number of bytes to request from a remote peer.
+ // This number is used as the high cap for account and storage range requests.
+ // Bytecode and trienode are limited more explicitly by the caps below.
+ maxRequestSize = 512 * 1024
+
+ // maxCodeRequestCount is the maximum number of bytecode blobs to request in a
+ // single query. If this number is too low, we're not filling responses fully
+ // and waste round trip times. If it's too high, we're capping responses and
+ // waste bandwidth.
+ //
+ // Deployed bytecodes are currently capped at 24KB, so the minimum request
+ // size should be maxRequestSize / 24K. Assuming that most contracts do not
+ // come close to that, requesting 4x should be a good approximation.
+ maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4
+
+ // maxTrieRequestCount is the maximum number of trie node blobs to request in
+ // a single query. If this number is too low, we're not filling responses fully
+ // and waste round trip times. If it's too high, we're capping responses and
+ // waste bandwidth.
+ maxTrieRequestCount = maxRequestSize / 512
+
+ // trienodeHealRateMeasurementImpact is the impact a single measurement has on
+ // the local node's trienode processing capacity. A value closer to 0 reacts
+ // slower to sudden changes, but it is also more stable against temporary hiccups.
+ trienodeHealRateMeasurementImpact = 0.005
+
+ // minTrienodeHealThrottle is the minimum divisor for throttling trie node
+ // heal requests to avoid overloading the local node and excessively expanding
+ // the state trie breadth wise.
+ minTrienodeHealThrottle = 1
+
+ // maxTrienodeHealThrottle is the maximum divisor for throttling trie node
+ // heal requests to avoid overloading the local node and exessively expanding
+ // the state trie bedth wise.
+ maxTrienodeHealThrottle = maxTrieRequestCount
+
+ // trienodeHealThrottleIncrease is the multiplier for the throttle when the
+ // rate of arriving data is higher than the rate of processing it.
+ trienodeHealThrottleIncrease = 1.33
+
+ // trienodeHealThrottleDecrease is the divisor for the throttle when the
+ // rate of arriving data is lower than the rate of processing it.
+ trienodeHealThrottleDecrease = 1.25
+)
+
+// of only the account path. There's no need to be able to address both an
+// account node and a storage node in the same request as it cannot happen
+// that a slot is accessed before the account path is fully expanded.
+type TrieNodePathSet [][]byte
+
+var (
+ // accountConcurrency is the number of chunks to split the account trie into
+ // to allow concurrent retrievals.
+ accountConcurrency = 16
+
+ // storageConcurrency is the number of chunks to split the a large contract
+ // storage trie into to allow concurrent retrievals.
+ storageConcurrency = 16
+
+ // MaxHash represents the maximum possible hash value.
+ MaxHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
+)
+
+// accountTask represents the sync task for a chunk of the account snapshot.
+type accountTask struct {
+ id uint64 //unique id for account task
+
+ root common.Hash
+ origin common.Hash
+ limit common.Hash
+ cap int
+
+ // These fields get serialized to leveldb on shutdown
+ Next common.Hash // Next account to sync in this interval
+ Last common.Hash // Last account to sync in this interval
+ SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts
+
+ pend int // Number of pending subtasks for this round
+
+ needCode []bool // Flags whether the filling accounts need code retrieval
+ needState []bool // Flags whether the filling accounts need storage retrieval
+ needHeal []bool // Flags whether the filling accounts's state was chunked and need healing
+
+ codeTasks map[common.Hash]struct{} // Code hashes that need retrieval
+ stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval
+
+ genBatch ethdb.Batch // Batch used by the node generator
+ genTrie *trie.StackTrie // Node generator from storage slots
+
+ requested bool
+ done bool // Flag whether the task can be removed
+
+ res *accountResponse
+}
+
+// accountResponse is an already Merkle-verified remote response to an account
+// range request. It contains the subtrie for the requested account range and
+// the database that's going to be filled with the internal nodes on commit.
+type accountResponse struct {
+ task *accountTask // Task which this request is filling
+ hashes []common.Hash // Account hashes in the returned range
+ accounts []*types.StateAccount // Expanded accounts in the returned range
+ cont bool // Whether the account range has a continuation
+}
+
+// storageTask represents the sync task for a chunk of the storage snapshot.
+type storageTask struct {
+ Next common.Hash // Next account to sync in this interval
+ Last common.Hash // Last account to sync in this interval
+ root common.Hash // Storage root hash for this instance
+ genBatch ethdb.Batch // Batch used by the node generator
+ genTrie *trie.StackTrie // Node generator from storage slots
+ requested bool
+ done bool // Flag whether the task can be removed
+}
+
+// healRequestSort implements the Sort interface, allowing sorting trienode
+// heal requests, which is a prerequisite for merging storage-requests.
+type healRequestSort struct {
+ paths []string
+ hashes []common.Hash
+ syncPaths []trie.SyncPath
+}
+
+func (t *healRequestSort) Len() int {
+ return len(t.hashes)
+}
+
+func (t *healRequestSort) Less(i, j int) bool {
+ a := t.syncPaths[i]
+ b := t.syncPaths[j]
+ switch bytes.Compare(a[0], b[0]) {
+ case -1:
+ return true
+ case 1:
+ return false
+ }
+ // identical first part
+ if len(a) < len(b) {
+ return true
+ }
+ if len(b) < len(a) {
+ return false
+ }
+ if len(a) == 2 {
+ return bytes.Compare(a[1], b[1]) < 0
+ }
+ return false
+}
+
+func (t *healRequestSort) Swap(i, j int) {
+ t.paths[i], t.paths[j] = t.paths[j], t.paths[i]
+ t.hashes[i], t.hashes[j] = t.hashes[j], t.hashes[i]
+ t.syncPaths[i], t.syncPaths[j] = t.syncPaths[j], t.syncPaths[i]
+}
+
+// Merge merges the pathsets, so that several storage requests concerning the
+// same account are merged into one, to reduce bandwidth.
+// This operation is moot if t has not first been sorted.
+func (t *healRequestSort) Merge() []*message.TrieNodePathSet {
+ var result []TrieNodePathSet
+ for _, path := range t.syncPaths {
+ pathset := TrieNodePathSet(path)
+ if len(path) == 1 {
+ // It's an account reference.
+ result = append(result, pathset)
+ } else {
+ // It's a storage reference.
+ end := len(result) - 1
+ if len(result) == 0 || !bytes.Equal(pathset[0], result[end][0]) {
+ // The account doesn't match last, create a new entry.
+ result = append(result, pathset)
+ } else {
+ // It's the same account as the previous one, add to the storage
+ // paths of that request.
+ result[end] = append(result[end], pathset[1])
+ }
+ }
+ }
+ // convert to array of pointers
+ result_ptr := make([]*message.TrieNodePathSet, 0)
+ for _, p := range result {
+ result_ptr = append(result_ptr, &message.TrieNodePathSet{
+ Pathset: p,
+ })
+ }
+ return result_ptr
+}
+
+type byteCodeTasksBundle struct {
+ id uint64 //unique id for bytecode task bundle
+ task *accountTask
+ hashes []common.Hash
+ cap int
+}
+
+type storageTaskBundle struct {
+ id uint64 //unique id for storage task bundle
+ root common.Hash
+ accounts []common.Hash
+ roots []common.Hash
+ mainTask *accountTask
+ subtask *storageTask
+ origin common.Hash
+ limit common.Hash
+ cap int
+}
+
+// healTask represents the sync task for healing the snap-synced chunk boundaries.
+type healTask struct {
+ id uint64
+ trieTasks map[string]common.Hash // Set of trie node tasks currently queued for retrieval, indexed by node path
+ codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval, indexed by code hash
+ paths []string
+ hashes []common.Hash
+ pathsets []*message.TrieNodePathSet
+ task *healTask
+ root common.Hash
+ bytes int
+ byteCodeReq bool
+}
+
+type tasks struct {
+ accountTasks map[uint64]*accountTask // Current account task set being synced
+ storageTasks map[uint64]*storageTaskBundle // Set of trie node tasks currently queued for retrieval, indexed by path
+ codeTasks map[uint64]*byteCodeTasksBundle // Set of byte code tasks currently queued for retrieval, indexed by hash
+ healer map[uint64]*healTask
+}
+
+func newTasks() *tasks {
+ return &tasks{
+ accountTasks: make(map[uint64]*accountTask, 0),
+ storageTasks: make(map[uint64]*storageTaskBundle, 0),
+ codeTasks: make(map[uint64]*byteCodeTasksBundle),
+ healer: make(map[uint64]*healTask, 0),
+ }
+}
+
+func (t *tasks) addAccountTask(accountTaskIndex uint64, ct *accountTask) {
+ t.accountTasks[accountTaskIndex] = ct
+}
+
+func (t *tasks) getAccountTask(accountTaskIndex uint64) *accountTask {
+ if _, ok := t.accountTasks[accountTaskIndex]; ok {
+ return t.accountTasks[accountTaskIndex]
+ }
+ return nil
+}
+
+func (t *tasks) deleteAccountTask(accountTaskIndex uint64) {
+ if _, ok := t.accountTasks[accountTaskIndex]; ok {
+ delete(t.accountTasks, accountTaskIndex)
+ }
+}
+
+func (t *tasks) addCodeTask(id uint64, bytecodeTask *byteCodeTasksBundle) {
+ t.codeTasks[id] = bytecodeTask
+}
+
+func (t *tasks) deleteCodeTask(id uint64) {
+ if _, ok := t.codeTasks[id]; ok {
+ delete(t.codeTasks, id)
+ }
+}
+
+func (t *tasks) addStorageTaskBundle(storageBundleIndex uint64, storages *storageTaskBundle) {
+ t.storageTasks[storageBundleIndex] = storages
+}
+
+func (t *tasks) deleteStorageTaskBundle(storageBundleIndex uint64) {
+ if _, ok := t.storageTasks[storageBundleIndex]; ok {
+ delete(t.storageTasks, storageBundleIndex)
+ }
+}
+
+func (t *tasks) addHealerTask(taskID uint64, task *healTask) {
+ t.healer[taskID] = task
+}
+
+func (t *tasks) deleteHealerTask(taskID uint64) {
+ if _, ok := t.healer[taskID]; ok {
+ delete(t.healer, taskID)
+ }
+}
+
+func (t *tasks) addHealerTrieTask(taskID uint64, path string, h common.Hash) {
+ if _, ok := t.healer[taskID]; ok {
+ t.healer[taskID].trieTasks[path] = h
+ }
+}
+
+func (t *tasks) getHealerTrieTask(taskID uint64, path string) common.Hash {
+ if _, ok := t.healer[taskID]; ok {
+ return t.healer[taskID].trieTasks[path]
+ }
+ return common.Hash{}
+}
+
+func (t *tasks) addHealerTrieCodeTask(taskID uint64, hash common.Hash, v struct{}) {
+ if _, ok := t.healer[taskID]; ok {
+ t.healer[taskID].codeTasks[hash] = v
+ }
+}
+
+func (t *tasks) getHealerTrieCodeTask(taskID uint64, h common.Hash) struct{} {
+ if _, ok := t.healer[taskID]; ok {
+ return t.healer[taskID].codeTasks[h]
+ }
+ return struct{}{}
+}
+
+// SyncProgress is a database entry to allow suspending and resuming a snapshot state
+// sync. Opposed to full and fast sync, there is no way to restart a suspended
+// snap sync without prior knowledge of the suspension point.
+type SyncProgress struct {
+ Tasks map[uint64]*accountTask // The suspended account tasks (contract tasks within)
+
+ // Status report during syncing phase
+ AccountSynced uint64 // Number of accounts downloaded
+ AccountBytes common.StorageSize // Number of account trie bytes persisted to disk
+ BytecodeSynced uint64 // Number of bytecodes downloaded
+ BytecodeBytes common.StorageSize // Number of bytecode bytes downloaded
+ StorageSynced uint64 // Number of storage slots downloaded
+ StorageBytes common.StorageSize // Number of storage trie bytes persisted to disk
+
+ // Status report during healing phase
+ TrienodeHealSynced uint64 // Number of state trie nodes downloaded
+ TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk
+ BytecodeHealSynced uint64 // Number of bytecodes downloaded
+ BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk
+}
+
+// FullStateDownloadManager is the helper structure for get blocks request management
+type FullStateDownloadManager struct {
+ bc core.BlockChain
+ tx kv.RwTx
+
+ db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
+ scheme string // Node scheme used in node database
+
+ tasks *tasks
+ requesting *tasks
+ processing *tasks
+ retries *tasks
+
+ root common.Hash // Current state trie root being synced
+ snapped bool // Flag to signal that snap phase is done
+
+ protocol syncProtocol
+ scheduler *trie.Sync // State trie sync scheduler defining the tasks
+ keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
+ concurrency int
+ logger zerolog.Logger
+ lock sync.RWMutex
+
+ numUncommitted int
+ bytesUncommitted int
+
+ accountSynced uint64 // Number of accounts downloaded
+ accountBytes common.StorageSize // Number of account trie bytes persisted to disk
+ bytecodeSynced uint64 // Number of bytecodes downloaded
+ bytecodeBytes common.StorageSize // Number of bytecode bytes downloaded
+ storageSynced uint64 // Number of storage slots downloaded
+ storageBytes common.StorageSize // Number of storage trie bytes persisted to disk
+
+ stateWriter ethdb.Batch // Shared batch writer used for persisting raw states
+ accountHealed uint64 // Number of accounts downloaded during the healing stage
+ accountHealedBytes common.StorageSize // Number of raw account bytes persisted to disk during the healing stage
+ storageHealed uint64 // Number of storage slots downloaded during the healing stage
+ storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
+
+ trienodeHealRate float64 // Average heal rate for processing trie node data
+ trienodeHealPend atomic.Uint64 // Number of trie nodes currently pending for processing
+ trienodeHealThrottle float64 // Divisor for throttling the amount of trienode heal data requested
+ trienodeHealThrottled time.Time // Timestamp the last time the throttle was updated
+
+ trienodeHealSynced uint64 // Number of state trie nodes downloaded
+ trienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk
+ trienodeHealDups uint64 // Number of state trie nodes already processed
+ trienodeHealNops uint64 // Number of state trie nodes not requested
+ bytecodeHealSynced uint64 // Number of bytecodes downloaded
+ bytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk
+ bytecodeHealDups uint64 // Number of bytecodes already processed
+ bytecodeHealNops uint64 // Number of bytecodes not requested
+
+ startTime time.Time // Time instance when snapshot sync started
+ logTime time.Time // Time instance when status was last reported
+}
+
+func newFullStateDownloadManager(db ethdb.KeyValueStore,
+ scheme string,
+ tx kv.RwTx,
+ bc core.BlockChain,
+ concurrency int,
+ logger zerolog.Logger) *FullStateDownloadManager {
+
+ return &FullStateDownloadManager{
+ db: db,
+ scheme: scheme,
+ bc: bc,
+ stateWriter: db.NewBatch(),
+ tx: tx,
+ keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState),
+ concurrency: concurrency,
+ logger: logger,
+ tasks: newTasks(),
+ requesting: newTasks(),
+ processing: newTasks(),
+ retries: newTasks(),
+ trienodeHealThrottle: maxTrienodeHealThrottle, // Tune downward instead of insta-filling with junk
+ }
+}
+
+func (s *FullStateDownloadManager) setRootHash(root common.Hash) {
+ s.root = root
+ s.scheduler = state.NewStateSync(root, s.db, s.onHealState, s.scheme)
+ s.loadSyncStatus()
+}
+
+func (s *FullStateDownloadManager) taskDone(taskID uint64) {
+ s.tasks.accountTasks[taskID].done = true
+}
+
+// SlimAccount is a modified version of an Account, where the root is replaced
+// with a byte slice. This format can be used to represent full-consensus format
+// or slim format which replaces the empty root and code hash as nil byte slice.
+type SlimAccount struct {
+ Nonce uint64
+ Balance *big.Int
+ Root []byte // Nil if root equals to types.EmptyRootHash
+ CodeHash []byte // Nil if hash equals to types.EmptyCodeHash
+}
+
+// SlimAccountRLP encodes the state account in 'slim RLP' format.
+func (s *FullStateDownloadManager) SlimAccountRLP(account types.StateAccount) []byte {
+ slim := SlimAccount{
+ Nonce: account.Nonce,
+ Balance: account.Balance,
+ }
+ if account.Root != types.EmptyRootHash {
+ slim.Root = account.Root[:]
+ }
+ if !bytes.Equal(account.CodeHash, types.EmptyCodeHash[:]) {
+ slim.CodeHash = account.CodeHash
+ }
+ data, err := rlp.EncodeToBytes(slim)
+ if err != nil {
+ panic(err)
+ }
+ return data
+}
+
+// FullAccount decodes the data on the 'slim RLP' format and returns
+// the consensus format account.
+func FullAccount(data []byte) (*types.StateAccount, error) {
+ var slim SlimAccount
+ if err := rlp.DecodeBytes(data, &slim); err != nil {
+ return nil, err
+ }
+ var account types.StateAccount
+ account.Nonce, account.Balance = slim.Nonce, slim.Balance
+
+ // Interpret the storage root and code hash in slim format.
+ if len(slim.Root) == 0 {
+ account.Root = types.EmptyRootHash
+ } else {
+ account.Root = common.BytesToHash(slim.Root)
+ }
+ if len(slim.CodeHash) == 0 {
+ account.CodeHash = types.EmptyCodeHash[:]
+ } else {
+ account.CodeHash = slim.CodeHash
+ }
+ return &account, nil
+}
+
+// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format.
+func FullAccountRLP(data []byte) ([]byte, error) {
+ account, err := FullAccount(data)
+ if err != nil {
+ return nil, err
+ }
+ return rlp.EncodeToBytes(account)
+}
+
+func (s *FullStateDownloadManager) commitHealer(force bool) {
+ if !force && s.scheduler.MemSize() < ethdb.IdealBatchSize {
+ return
+ }
+ batch := s.db.NewBatch()
+ if err := s.scheduler.Commit(batch); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to commit healing data")
+ }
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to persist healing data", "err", err)
+ }
+ utils.Logger().Debug().Str("type", "trienodes").Interface("bytes", common.StorageSize(batch.ValueSize())).Msg("Persisted set of healing data")
+}
+
+func (s *FullStateDownloadManager) SyncStarted() {
+ if s.startTime == (time.Time{}) {
+ s.startTime = time.Now()
+ }
+}
+
+func (s *FullStateDownloadManager) SyncCompleted() {
+ defer func() { // Persist any progress, independent of failure
+ for _, task := range s.tasks.accountTasks {
+ s.forwardAccountTask(task)
+ }
+ s.cleanAccountTasks()
+ s.saveSyncStatus()
+ }()
+
+ // Flush out the last committed raw states
+ defer func() {
+ if s.stateWriter.ValueSize() > 0 {
+ s.stateWriter.Write()
+ s.stateWriter.Reset()
+ }
+ }()
+
+ // commit any trie- and bytecode-healing data.
+ defer s.commitHealer(true)
+
+ // Whether sync completed or not, disregard any future packets
+ defer func() {
+ utils.Logger().Debug().Interface("root", s.root).Msg("Terminating snapshot sync cycle")
+ }()
+
+ elapsed := time.Since(s.startTime)
+ utils.Logger().Debug().Interface("elapsed", elapsed).Msg("Snapshot sync already completed")
+}
+
+// getNextBatch returns objects with a maximum of n state download
+// tasks to send to the remote peer.
+func (s *FullStateDownloadManager) GetNextBatch() (accounts []*accountTask,
+ codes []*byteCodeTasksBundle,
+ storages *storageTaskBundle,
+ healtask *healTask,
+ codetask *healTask,
+ nItems int,
+ err error) {
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ accounts, codes, storages, healtask, codetask, nItems = s.getBatchFromRetries()
+
+ if nItems > 0 {
+ return
+ }
+
+ if len(s.tasks.accountTasks) == 0 && s.scheduler.Pending() == 0 {
+ s.SyncCompleted()
+ return
+ }
+
+ // Refill available tasks from the scheduler.
+ newAccounts, newCodes, newStorageTaskBundle, newHealTask, newCodeTask, nItems := s.getBatchFromUnprocessed()
+ accounts = append(accounts, newAccounts...)
+ codes = append(codes, newCodes...)
+ storages = newStorageTaskBundle
+ healtask = newHealTask
+ codetask = newCodeTask
+
+ return
+}
+
+// saveSyncStatus marshals the remaining sync tasks into leveldb.
+func (s *FullStateDownloadManager) saveSyncStatus() {
+ // Serialize any partial progress to disk before spinning down
+ for _, task := range s.tasks.accountTasks {
+ if err := task.genBatch.Write(); err != nil {
+ utils.Logger().Debug().
+ Err(err).
+ Msg("Failed to persist account slots")
+ }
+ for _, subtasks := range task.SubTasks {
+ for _, subtask := range subtasks {
+ if err := subtask.genBatch.Write(); err != nil {
+ utils.Logger().Debug().
+ Err(err).
+ Msg("Failed to persist storage slots")
+ }
+ }
+ }
+ }
+ // Store the actual progress markers
+ progress := &SyncProgress{
+ Tasks: s.tasks.accountTasks,
+ AccountSynced: s.accountSynced,
+ AccountBytes: s.accountBytes,
+ BytecodeSynced: s.bytecodeSynced,
+ BytecodeBytes: s.bytecodeBytes,
+ StorageSynced: s.storageSynced,
+ StorageBytes: s.storageBytes,
+ TrienodeHealSynced: s.trienodeHealSynced,
+ TrienodeHealBytes: s.trienodeHealBytes,
+ BytecodeHealSynced: s.bytecodeHealSynced,
+ BytecodeHealBytes: s.bytecodeHealBytes,
+ }
+ status, err := json.Marshal(progress)
+ if err != nil {
+ panic(err) // This can only fail during implementation
+ }
+ rawdb.WriteSnapshotSyncStatus(s.db, status)
+}
+
+// loadSyncStatus retrieves a previously aborted sync status from the database,
+// or generates a fresh one if none is available.
+func (s *FullStateDownloadManager) loadSyncStatus() {
+ var progress SyncProgress
+
+ if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {
+ if err := json.Unmarshal(status, &progress); err != nil {
+ utils.Logger().Error().
+ Err(err).
+ Msg("Failed to decode snap sync status")
+ } else {
+ for _, task := range progress.Tasks {
+ utils.Logger().Debug().
+ Interface("from", task.Next).
+ Interface("last", task.Last).
+ Msg("Scheduled account sync task")
+ }
+ s.tasks.accountTasks = progress.Tasks
+ for _, task := range s.tasks.accountTasks {
+ task := task // closure for task.genBatch in the stacktrie writer callback
+
+ task.genBatch = ethdb.HookedBatch{
+ Batch: s.db.NewBatch(),
+ OnPut: func(key []byte, value []byte) {
+ s.accountBytes += common.StorageSize(len(key) + len(value))
+ },
+ }
+ // options := trie.NewStackTrieOptions()
+ writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(task.genBatch, common.Hash{}, path, hash, blob, s.scheme)
+ }
+ task.genTrie = trie.NewStackTrie(writeFn)
+ for accountHash, subtasks := range task.SubTasks {
+ for _, subtask := range subtasks {
+ subtask := subtask // closure for subtask.genBatch in the stacktrie writer callback
+
+ subtask.genBatch = ethdb.HookedBatch{
+ Batch: s.db.NewBatch(),
+ OnPut: func(key []byte, value []byte) {
+ s.storageBytes += common.StorageSize(len(key) + len(value))
+ },
+ }
+ // owner := accountHash // local assignment for stacktrie writer closure
+ writeFn = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(subtask.genBatch, accountHash, path, hash, blob, s.scheme)
+ }
+ subtask.genTrie = trie.NewStackTrie(writeFn)
+ }
+ }
+ }
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.snapped = len(s.tasks.accountTasks) == 0
+
+ s.accountSynced = progress.AccountSynced
+ s.accountBytes = progress.AccountBytes
+ s.bytecodeSynced = progress.BytecodeSynced
+ s.bytecodeBytes = progress.BytecodeBytes
+ s.storageSynced = progress.StorageSynced
+ s.storageBytes = progress.StorageBytes
+
+ s.trienodeHealSynced = progress.TrienodeHealSynced
+ s.trienodeHealBytes = progress.TrienodeHealBytes
+ s.bytecodeHealSynced = progress.BytecodeHealSynced
+ s.bytecodeHealBytes = progress.BytecodeHealBytes
+ return
+ }
+ }
+ // Either we've failed to decode the previous state, or there was none.
+ // Start a fresh sync by chunking up the account range and scheduling
+ // them for retrieval.
+ s.tasks = newTasks()
+ s.accountSynced, s.accountBytes = 0, 0
+ s.bytecodeSynced, s.bytecodeBytes = 0, 0
+ s.storageSynced, s.storageBytes = 0, 0
+ s.trienodeHealSynced, s.trienodeHealBytes = 0, 0
+ s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0
+
+ var next common.Hash
+ step := new(big.Int).Sub(
+ new(big.Int).Div(
+ new(big.Int).Exp(common.Big2, common.Big256, nil),
+ big.NewInt(int64(accountConcurrency)),
+ ), common.Big1,
+ )
+ for i := 0; i < accountConcurrency; i++ {
+ last := common.BigToHash(new(big.Int).Add(next.Big(), step))
+ if i == accountConcurrency-1 {
+ // Make sure we don't overflow if the step is not a proper divisor
+ last = MaxHash
+ }
+ batch := ethdb.HookedBatch{
+ Batch: s.db.NewBatch(),
+ OnPut: func(key []byte, value []byte) {
+ s.accountBytes += common.StorageSize(len(key) + len(value))
+ },
+ }
+ // options := trie.NewStackTrieOptions()
+ writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(batch, common.Hash{}, path, hash, blob, s.scheme)
+ }
+ // create a unique id for task
+ var taskID uint64
+ for {
+ taskID = uint64(rand.Int63())
+ if taskID == 0 {
+ continue
+ }
+ if _, ok := s.tasks.accountTasks[taskID]; ok {
+ continue
+ }
+ break
+ }
+ s.tasks.addAccountTask(taskID, &accountTask{
+ id: taskID,
+ Next: next,
+ Last: last,
+ SubTasks: make(map[common.Hash][]*storageTask),
+ genBatch: batch,
+ genTrie: trie.NewStackTrie(writeFn),
+ })
+ utils.Logger().Debug().
+ Interface("from", next).
+ Interface("last", last).
+ Msg("Created account sync task")
+
+ next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))
+ }
+}
+
+// cleanAccountTasks removes account range retrieval tasks that have already been
+// completed.
+func (s *FullStateDownloadManager) cleanAccountTasks() {
+ // If the sync was already done before, don't even bother
+ if len(s.tasks.accountTasks) == 0 {
+ return
+ }
+ // Sync wasn't finished previously, check for any task that can be finalized
+ for taskID, _ := range s.tasks.accountTasks {
+ if s.tasks.accountTasks[taskID].done {
+ s.tasks.deleteAccountTask(taskID)
+ }
+ }
+ // If everything was just finalized just, generate the account trie and start heal
+ if len(s.tasks.accountTasks) == 0 {
+ s.lock.Lock()
+ s.snapped = true
+ s.lock.Unlock()
+
+ // Push the final sync report
+ //s.reportSyncProgress(true)
+ }
+}
+
+// cleanStorageTasks iterates over all the account tasks and storage sub-tasks
+// within, cleaning any that have been completed.
+func (s *FullStateDownloadManager) cleanStorageTasks() {
+ for _, task := range s.tasks.accountTasks {
+ for account, subtasks := range task.SubTasks {
+ // Remove storage range retrieval tasks that completed
+ for j := 0; j < len(subtasks); j++ {
+ if subtasks[j].done {
+ subtasks = append(subtasks[:j], subtasks[j+1:]...)
+ j--
+ }
+ }
+ if len(subtasks) > 0 {
+ task.SubTasks[account] = subtasks
+ continue
+ }
+ // If all storage chunks are done, mark the account as done too
+ for j, hash := range task.res.hashes {
+ if hash == account {
+ task.needState[j] = false
+ }
+ }
+ delete(task.SubTasks, account)
+ task.pend--
+
+ // If this was the last pending task, forward the account task
+ if task.pend == 0 {
+ s.forwardAccountTask(task)
+ }
+ }
+ }
+}
+
+// forwardAccountTask takes a filled account task and persists anything available
+// into the database, after which it forwards the next account marker so that the
+// task's next chunk may be filled.
+func (s *FullStateDownloadManager) forwardAccountTask(task *accountTask) {
+ // Remove any pending delivery
+ res := task.res
+ if res == nil {
+ return // nothing to forward
+ }
+ task.res = nil
+
+ // Persist the received account segments. These flat state maybe
+ // outdated during the sync, but it can be fixed later during the
+ // snapshot generation.
+ oldAccountBytes := s.accountBytes
+
+ batch := ethdb.HookedBatch{
+ Batch: s.db.NewBatch(),
+ OnPut: func(key []byte, value []byte) {
+ s.accountBytes += common.StorageSize(len(key) + len(value))
+ },
+ }
+ for i, hash := range res.hashes {
+ if task.needCode[i] || task.needState[i] {
+ break
+ }
+ slim := s.SlimAccountRLP(*res.accounts[i])
+ rawdb.WriteAccountSnapshot(batch, hash, slim)
+
+ // If the task is complete, drop it into the stack trie to generate
+ // account trie nodes for it
+ if !task.needHeal[i] {
+ full, err := FullAccountRLP(slim) // TODO(karalabe): Slim parsing can be omitted
+ if err != nil {
+ panic(err) // Really shouldn't ever happen
+ }
+ task.genTrie.Update(hash[:], full)
+ }
+ }
+ // Flush anything written just now and update the stats
+ if err := batch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to persist accounts")
+ }
+ s.accountSynced += uint64(len(res.accounts))
+
+ // Task filling persisted, push it the chunk marker forward to the first
+ // account still missing data.
+ for i, hash := range res.hashes {
+ if task.needCode[i] || task.needState[i] {
+ return
+ }
+ task.Next = incHash(hash)
+ }
+ // All accounts marked as complete, track if the entire task is done
+ task.done = !res.cont
+
+ // Stack trie could have generated trie nodes, push them to disk (we need to
+ // flush after finalizing task.done. It's fine even if we crash and lose this
+ // write as it will only cause more data to be downloaded during heal.
+ if task.done {
+ task.genTrie.Commit()
+ }
+ if task.genBatch.ValueSize() > ethdb.IdealBatchSize || task.done {
+ if err := task.genBatch.Write(); err != nil {
+ utils.Logger().Error().Err(err).Msg("Failed to persist stack account")
+ }
+ task.genBatch.Reset()
+ }
+ utils.Logger().Debug().
+ Int("accounts", len(res.accounts)).
+ Float64("bytes", float64(s.accountBytes-oldAccountBytes)).
+ Msg("Persisted range of accounts")
+}
+
+// updateStats bumps the various state sync progress counters and displays a log
+// message for the user to see.
+func (s *FullStateDownloadManager) updateStats(written, duplicate, unexpected int, duration time.Duration) {
+ // TODO: here it updates the stats for total pending, processed, duplicates and unexpected
+
+ // for now, we just jog current stats
+ if written > 0 || duplicate > 0 || unexpected > 0 {
+ utils.Logger().Info().
+ Int("count", written).
+ Int("duplicate", duplicate).
+ Int("unexpected", unexpected).
+ Msg("Imported new state entries")
+ }
+}
+
+// getBatchFromUnprocessed returns objects with a maximum of n unprocessed state download
+// tasks to send to the remote peer.
+func (s *FullStateDownloadManager) getBatchFromUnprocessed() (
+ accounts []*accountTask,
+ codes []*byteCodeTasksBundle,
+ storages *storageTaskBundle,
+ healtask *healTask,
+ codetask *healTask,
+ count int) {
+
+ // over trie nodes as those can be written to disk and forgotten about.
+ codes = make([]*byteCodeTasksBundle, 0)
+ accounts = make([]*accountTask, 0)
+ count = 0
+
+ for i, task := range s.tasks.accountTasks {
+ // Stop when we've gathered enough requests
+ // if len(accounts) == n {
+ // return
+ // }
+
+ // if already requested
+ if task.requested {
+ continue
+ }
+
+ // create a unique id for healer task
+ var taskID uint64
+ for {
+ taskID = uint64(rand.Int63())
+ if taskID == 0 {
+ continue
+ }
+ if _, ok := s.tasks.accountTasks[taskID]; ok {
+ continue
+ }
+ break
+ }
+
+ task.root = s.root
+ task.origin = task.Next
+ task.limit = task.Last
+ task.cap = maxRequestSize
+ task.requested = true
+ s.tasks.accountTasks[i].requested = true
+ accounts = append(accounts, task)
+ s.requesting.addAccountTask(task.id, task)
+ s.tasks.addAccountTask(task.id, task)
+
+ // one task account is enough for an stream
+ count = len(accounts)
+ return
+ }
+
+ totalHashes := int(0)
+
+ for _, task := range s.tasks.accountTasks {
+ // Skip tasks that are already retrieving (or done with) all codes
+ if len(task.codeTasks) == 0 {
+ continue
+ }
+
+ var hashes []common.Hash
+ for hash := range task.codeTasks {
+ delete(task.codeTasks, hash)
+ hashes = append(hashes, hash)
+ }
+ totalHashes += len(hashes)
+
+ // create a unique id for task bundle
+ var taskID uint64
+ for {
+ taskID = uint64(rand.Int63())
+ if taskID == 0 {
+ continue
+ }
+ if _, ok := s.tasks.codeTasks[taskID]; ok {
+ continue
+ }
+ break
+ }
+
+ bytecodeTask := &byteCodeTasksBundle{
+ id: taskID,
+ hashes: hashes,
+ task: task,
+ cap: maxRequestSize,
+ }
+ codes = append(codes, bytecodeTask)
+
+ s.requesting.addCodeTask(taskID, bytecodeTask)
+ s.tasks.addCodeTask(taskID, bytecodeTask)
+
+ // Stop when we've gathered enough requests
+ if totalHashes >= maxCodeRequestCount {
+ count = totalHashes
+ return
+ }
+ }
+
+ // if we found some codes, can assign it to node
+ if totalHashes > 0 {
+ count = totalHashes
+ return
+ }
+
+ for accTaskID, task := range s.tasks.accountTasks {
+ // Skip tasks that are already retrieving (or done with) all small states
+ if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 {
+ continue
+ }
+
+ cap := maxRequestSize
+ storageSets := cap / 1024
+
+ storages = &storageTaskBundle{
+ accounts: make([]common.Hash, 0, storageSets),
+ roots: make([]common.Hash, 0, storageSets),
+ mainTask: task,
+ }
+
+ // create a unique id for task bundle
+ var taskID uint64
+ for {
+ taskID = uint64(rand.Int63())
+ if taskID == 0 {
+ continue
+ }
+ if _, ok := s.tasks.storageTasks[taskID]; ok {
+ continue
+ }
+ break
+ }
+ storages.id = taskID
+
+ for account, subtasks := range task.SubTasks {
+ // find the first subtask which is not requested yet
+ for i, st := range subtasks {
+ // Skip any subtasks already filling
+ if st.requested {
+ continue
+ }
+ // Found an incomplete storage chunk, schedule it
+ storages.accounts = append(storages.accounts, account)
+ storages.roots = append(storages.roots, st.root)
+ storages.subtask = st
+ s.tasks.accountTasks[accTaskID].SubTasks[account][i].requested = true
+ break // Large contract chunks are downloaded individually
+ }
+ if storages.subtask != nil {
+ break // Large contract chunks are downloaded individually
+ }
+ }
+ if storages.subtask == nil {
+ // No large contract required retrieval, but small ones available
+ for account, root := range task.stateTasks {
+ delete(task.stateTasks, account)
+
+ storages.accounts = append(storages.accounts, account)
+ storages.roots = append(storages.roots, root)
+
+ if len(storages.accounts) >= storageSets {
+ break
+ }
+ }
+ }
+ // If nothing was found, it means this task is actually already fully
+ // retrieving, but large contracts are hard to detect. Skip to the next.
+ if len(storages.accounts) == 0 {
+ continue
+ }
+ if storages.subtask != nil {
+ storages.origin = storages.subtask.Next
+ storages.limit = storages.subtask.Last
+ }
+ storages.root = s.root
+ storages.cap = cap
+ s.tasks.addStorageTaskBundle(taskID, storages)
+ s.requesting.addStorageTaskBundle(taskID, storages)
+ count = len(storages.accounts)
+ return
+ }
+
+ if len(storages.accounts) > 0 {
+ count = len(storages.accounts)
+ return
+ }
+
+ // Sync phase done, run heal phase
+ // Iterate over pending tasks
+ for (len(s.tasks.healer) > 0 && len(s.tasks.healer[0].hashes) > 0) || s.scheduler.Pending() > 0 {
+ // If there are not enough trie tasks queued to fully assign, fill the
+ // queue from the state sync scheduler. The trie synced schedules these
+ // together with bytecodes, so we need to queue them combined.
+
+ // index 0 keeps all tasks, later we split it into multiple batch
+ if len(s.tasks.healer) == 0 {
+ s.tasks.healer[0] = &healTask{
+ trieTasks: make(map[string]common.Hash, 0),
+ codeTasks: make(map[common.Hash]struct{}, 0),
+ }
+ }
+
+ mPaths, mHashes, mCodes := s.scheduler.Missing(maxTrieRequestCount)
+ for i, path := range mPaths {
+ s.tasks.healer[0].trieTasks[path] = mHashes[i]
+ }
+ for _, hash := range mCodes {
+ s.tasks.healer[0].codeTasks[hash] = struct{}{}
+ }
+
+ // If all the heal tasks are bytecodes or already downloading, bail
+ if len(s.tasks.healer[0].trieTasks) == 0 {
+ break
+ }
+ // Generate the network query and send it to the peer
+ // if cap > maxTrieRequestCount {
+ // cap = maxTrieRequestCount
+ // }
+ cap := int(float64(maxTrieRequestCount) / s.trienodeHealThrottle)
+ if cap <= 0 {
+ cap = 1
+ }
+ var (
+ hashes = make([]common.Hash, 0, cap)
+ paths = make([]string, 0, cap)
+ pathsets = make([]*message.TrieNodePathSet, 0, cap)
+ )
+ for path, hash := range s.tasks.healer[0].trieTasks {
+ delete(s.tasks.healer[0].trieTasks, path)
+
+ paths = append(paths, path)
+ hashes = append(hashes, hash)
+ if len(paths) >= cap {
+ break
+ }
+ }
+
+ // Group requests by account hash
+ paths, hashes, _, pathsets = sortByAccountPath(paths, hashes)
+
+ // create a unique id for healer task
+ var taskID uint64
+ for {
+ taskID = uint64(rand.Int63())
+ if taskID == 0 {
+ continue
+ }
+ if _, ok := s.tasks.healer[taskID]; ok {
+ continue
+ }
+ break
+ }
+
+ healtask = &healTask{
+ id: taskID,
+ hashes: hashes,
+ paths: paths,
+ pathsets: pathsets,
+ root: s.root,
+ task: s.tasks.healer[0],
+ bytes: maxRequestSize,
+ byteCodeReq: false,
+ }
+
+ s.tasks.healer[taskID] = healtask
+ s.requesting.addHealerTask(taskID, healtask)
+
+ if len(hashes) > 0 {
+ count = len(hashes)
+ return
+ }
+ }
+
+ // trying to get bytecodes
+ // Iterate over pending tasks and try to find a peer to retrieve with
+ for (len(s.tasks.healer) > 0 && len(s.tasks.healer[0].codeTasks) > 0) || s.scheduler.Pending() > 0 {
+ // If there are not enough trie tasks queued to fully assign, fill the
+ // queue from the state sync scheduler. The trie synced schedules these
+ // together with trie nodes, so we need to queue them combined.
+
+ mPaths, mHashes, mCodes := s.scheduler.Missing(maxTrieRequestCount)
+ for i, path := range mPaths {
+ s.tasks.healer[0].trieTasks[path] = mHashes[i]
+ }
+ for _, hash := range mCodes {
+ s.tasks.healer[0].codeTasks[hash] = struct{}{}
+ }
+
+ // If all the heal tasks are trienodes or already downloading, bail
+ if len(s.tasks.healer[0].codeTasks) == 0 {
+ break
+ }
+ // Task pending retrieval, try to find an idle peer. If no such peer
+ // exists, we probably assigned tasks for all (or they are stateless).
+ // Abort the entire assignment mechanism.
+
+ // Generate the network query and send it to the peer
+ // if cap > maxCodeRequestCount {
+ // cap = maxCodeRequestCount
+ // }
+ cap := maxCodeRequestCount
+ hashes := make([]common.Hash, 0, cap)
+ for hash := range s.tasks.healer[0].codeTasks {
+ delete(s.tasks.healer[0].codeTasks, hash)
+
+ hashes = append(hashes, hash)
+ if len(hashes) >= cap {
+ break
+ }
+ }
+
+ // create a unique id for healer task
+ var taskID uint64
+ for {
+ taskID = uint64(rand.Int63())
+ if taskID == 0 {
+ continue
+ }
+ if _, ok := s.tasks.healer[taskID]; ok {
+ continue
+ }
+ break
+ }
+
+ codetask = &healTask{
+ id: taskID,
+ hashes: hashes,
+ task: s.tasks.healer[0],
+ bytes: maxRequestSize,
+ byteCodeReq: true,
+ }
+ count = len(hashes)
+ s.tasks.healer[taskID] = codetask
+ s.requesting.addHealerTask(taskID, healtask)
+ }
+
+ return
+}
+
+// sortByAccountPath takes hashes and paths, and sorts them. After that, it generates
+// the TrieNodePaths and merges paths which belongs to the same account path.
+func sortByAccountPath(paths []string, hashes []common.Hash) ([]string, []common.Hash, []trie.SyncPath, []*message.TrieNodePathSet) {
+ var syncPaths []trie.SyncPath
+ for _, path := range paths {
+ syncPaths = append(syncPaths, trie.NewSyncPath([]byte(path)))
+ }
+ n := &healRequestSort{paths, hashes, syncPaths}
+ sort.Sort(n)
+ pathsets := n.Merge()
+ return n.paths, n.hashes, n.syncPaths, pathsets
+}
+
+// getBatchFromRetries get the block number batch to be requested from retries.
+func (s *FullStateDownloadManager) getBatchFromRetries() (
+ accounts []*accountTask,
+ codes []*byteCodeTasksBundle,
+ storages *storageTaskBundle,
+ healtask *healTask,
+ codetask *healTask,
+ count int) {
+
+ // over trie nodes as those can be written to disk and forgotten about.
+ accounts = make([]*accountTask, 0)
+ codes = make([]*byteCodeTasksBundle, 0)
+
+ for _, task := range s.retries.accountTasks {
+ // Stop when we've gathered enough requests
+ // if len(accounts) == n {
+ // return
+ // }
+ accounts = append(accounts, task)
+ s.requesting.addAccountTask(task.id, task)
+ s.retries.deleteAccountTask(task.id)
+ return
+ }
+
+ if len(accounts) > 0 {
+ count = len(accounts)
+ return
+ }
+
+ for _, code := range s.retries.codeTasks {
+ codes = append(codes, code)
+ s.requesting.addCodeTask(code.id, code)
+ s.retries.deleteCodeTask(code.id)
+ return
+ }
+
+ if len(codes) > 0 {
+ count = len(codes)
+ return
+ }
+
+ if s.retries.storageTasks != nil && len(s.retries.storageTasks) > 0 {
+ storages = &storageTaskBundle{
+ id: s.retries.storageTasks[0].id,
+ accounts: s.retries.storageTasks[0].accounts,
+ roots: s.retries.storageTasks[0].roots,
+ mainTask: s.retries.storageTasks[0].mainTask,
+ subtask: s.retries.storageTasks[0].subtask,
+ limit: s.retries.storageTasks[0].limit,
+ origin: s.retries.storageTasks[0].origin,
+ }
+ s.requesting.addStorageTaskBundle(storages.id, storages)
+ s.retries.deleteStorageTaskBundle(storages.id)
+ count = len(storages.accounts)
+ return
+ }
+
+ if s.retries.healer != nil && len(s.retries.healer) > 0 {
+
+ for id, task := range s.retries.healer {
+ if !task.byteCodeReq {
+ healtask = &healTask{
+ id: id,
+ hashes: task.hashes,
+ paths: task.paths,
+ pathsets: task.pathsets,
+ root: task.root,
+ task: task.task,
+ byteCodeReq: task.byteCodeReq,
+ }
+ s.requesting.addHealerTask(id, task)
+ s.retries.deleteHealerTask(id)
+ count = len(task.hashes)
+ return
+ }
+ if task.byteCodeReq {
+ codetask = &healTask{
+ id: id,
+ hashes: task.hashes,
+ paths: task.paths,
+ pathsets: task.pathsets,
+ root: task.root,
+ task: task.task,
+ byteCodeReq: task.byteCodeReq,
+ }
+ s.requesting.addHealerTask(id, task)
+ s.retries.deleteHealerTask(id)
+ count = len(task.hashes)
+ return
+ }
+ }
+ }
+
+ count = 0
+ return
+}
+
+// HandleRequestError handles the error result
+func (s *FullStateDownloadManager) HandleRequestError(accounts []*accountTask,
+ codes []*byteCodeTasksBundle,
+ storages *storageTaskBundle,
+ healtask *healTask,
+ codetask *healTask,
+ streamID sttypes.StreamID, err error) {
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if accounts != nil && len(accounts) > 0 {
+ for _, task := range accounts {
+ s.requesting.deleteAccountTask(task.id)
+ s.retries.addAccountTask(task.id, task)
+ }
+ }
+
+ if codes != nil && len(codes) > 0 {
+ for _, code := range codes {
+ s.requesting.deleteCodeTask(code.id)
+ s.retries.addCodeTask(code.id, code)
+ }
+ }
+
+ if storages != nil {
+ s.requesting.addStorageTaskBundle(storages.id, storages)
+ s.retries.deleteStorageTaskBundle(storages.id)
+ }
+
+ if healtask != nil {
+ s.retries.addHealerTask(healtask.id, healtask)
+ s.requesting.deleteHealerTask(healtask.id)
+ }
+
+ if codetask != nil {
+ s.retries.addHealerTask(codetask.id, codetask)
+ s.requesting.deleteHealerTask(codetask.id)
+ }
+}
+
+// UnpackAccountRanges retrieves the accounts from the range packet and converts from slim
+// wire representation to consensus format. The returned data is RLP encoded
+// since it's expected to be serialized to disk without further interpretation.
+//
+// Note, this method does a round of RLP decoding and re-encoding, so only use it
+// once and cache the results if need be. Ideally discard the packet afterwards
+// to not double the memory use.
+func (s *FullStateDownloadManager) UnpackAccountRanges(retAccounts []*message.AccountData) ([]common.Hash, [][]byte, error) {
+ var (
+ hashes = make([]common.Hash, len(retAccounts))
+ accounts = make([][]byte, len(retAccounts))
+ )
+ for i, acc := range retAccounts {
+ val, err := FullAccountRLP(acc.Body)
+ if err != nil {
+ return nil, nil, fmt.Errorf("invalid account %x: %v", acc.Body, err)
+ }
+ hashes[i] = common.BytesToHash(acc.Hash)
+ accounts[i] = val
+ }
+ return hashes, accounts, nil
+}
+
+// HandleAccountRequestResult handles get account ranges result
+func (s *FullStateDownloadManager) HandleAccountRequestResult(task *accountTask,
+ retAccounts []*message.AccountData,
+ proof [][]byte,
+ origin []byte,
+ last []byte,
+ loopID int,
+ streamID sttypes.StreamID) error {
+
+ hashes, accounts, err := s.UnpackAccountRanges(retAccounts)
+ if err != nil {
+ return err
+ }
+
+ size := common.StorageSize(len(hashes) * common.HashLength)
+ for _, account := range accounts {
+ size += common.StorageSize(len(account))
+ }
+ for _, node := range proof {
+ size += common.StorageSize(len(node))
+ }
+ utils.Logger().Trace().
+ Int("hashes", len(hashes)).
+ Int("accounts", len(accounts)).
+ Int("proofs", len(proof)).
+ Interface("bytes", size).
+ Msg("Delivering range of accounts")
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ // Response is valid, but check if peer is signalling that it does not have
+ // the requested data. For account range queries that means the state being
+ // retrieved was either already pruned remotely, or the peer is not yet
+ // synced to our head.
+ if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
+ utils.Logger().Debug().
+ Interface("root", s.root).
+ Msg("Peer rejected account range request")
+ s.lock.Unlock()
+ return nil
+ }
+ root := s.root
+ s.lock.Unlock()
+
+ // Reconstruct a partial trie from the response and verify it
+ keys := make([][]byte, len(hashes))
+ for i, key := range hashes {
+ keys[i] = common.CopyBytes(key[:])
+ }
+ nodes := make(ProofList, len(proof))
+ for i, node := range proof {
+ nodes[i] = node
+ }
+ cont, err := trie.VerifyRangeProof(root, origin[:], last[:], keys, accounts, nodes.Set())
+ if err != nil {
+ utils.Logger().Warn().Err(err).Msg("Account range failed proof")
+ // Signal this request as failed, and ready for rescheduling
+ return err
+ }
+ accs := make([]*types.StateAccount, len(accounts))
+ for i, account := range accounts {
+ acc := new(types.StateAccount)
+ if err := rlp.DecodeBytes(account, acc); err != nil {
+ panic(err) // We created these blobs, we must be able to decode them
+ }
+ accs[i] = acc
+ }
+
+ if err := s.processAccountResponse(task, hashes, accs, cont); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// processAccountResponse integrates an already validated account range response
+// into the account tasks.
+func (s *FullStateDownloadManager) processAccountResponse(task *accountTask, // Task which this request is filling
+ hashes []common.Hash, // Account hashes in the returned range
+ accounts []*types.StateAccount, // Expanded accounts in the returned range
+ cont bool, // Whether the account range has a continuation
+) error {
+
+ if _, ok := s.tasks.accountTasks[task.id]; ok {
+ s.tasks.accountTasks[task.id].res = &accountResponse{
+ task: task,
+ hashes: hashes,
+ accounts: accounts,
+ cont: cont,
+ }
+ }
+
+ // Ensure that the response doesn't overflow into the subsequent task
+ last := task.Last.Big()
+ for i, hash := range hashes {
+ // Mark the range complete if the last is already included.
+ // Keep iteration to delete the extra states if exists.
+ cmp := hash.Big().Cmp(last)
+ if cmp == 0 {
+ cont = false
+ continue
+ }
+ if cmp > 0 {
+ // Chunk overflown, cut off excess
+ hashes = hashes[:i]
+ accounts = accounts[:i]
+ cont = false // Mark range completed
+ break
+ }
+ }
+ // Iterate over all the accounts and assemble which ones need further sub-
+ // filling before the entire account range can be persisted.
+ task.needCode = make([]bool, len(accounts))
+ task.needState = make([]bool, len(accounts))
+ task.needHeal = make([]bool, len(accounts))
+
+ task.codeTasks = make(map[common.Hash]struct{})
+ task.stateTasks = make(map[common.Hash]common.Hash)
+
+ resumed := make(map[common.Hash]struct{})
+
+ task.pend = 0
+ for i, account := range accounts {
+ // Check if the account is a contract with an unknown code
+ if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) {
+ if !rawdb.HasCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)) {
+ task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}
+ task.needCode[i] = true
+ task.pend++
+ }
+ }
+ // Check if the account is a contract with an unknown storage trie
+ if account.Root != types.EmptyRootHash {
+ if !rawdb.HasTrieNode(s.db, hashes[i], nil, account.Root, s.scheme) {
+ // If there was a previous large state retrieval in progress,
+ // don't restart it from scratch. This happens if a sync cycle
+ // is interrupted and resumed later. However, *do* update the
+ // previous root hash.
+ if subtasks, ok := task.SubTasks[hashes[i]]; ok {
+ utils.Logger().Debug().Interface("account", hashes[i]).Interface("root", account.Root).Msg("Resuming large storage retrieval")
+ for _, subtask := range subtasks {
+ subtask.root = account.Root
+ }
+ task.needHeal[i] = true
+ resumed[hashes[i]] = struct{}{}
+ } else {
+ task.stateTasks[hashes[i]] = account.Root
+ }
+ task.needState[i] = true
+ task.pend++
+ }
+ }
+ }
+ // Delete any subtasks that have been aborted but not resumed. This may undo
+ // some progress if a new peer gives us less accounts than an old one, but for
+ // now we have to live with that.
+ for hash := range task.SubTasks {
+ if _, ok := resumed[hash]; !ok {
+ utils.Logger().Debug().Interface("account", hash).Msg("Aborting suspended storage retrieval")
+ delete(task.SubTasks, hash)
+ }
+ }
+ // If the account range contained no contracts, or all have been fully filled
+ // beforehand, short circuit storage filling and forward to the next task
+ if task.pend == 0 {
+ s.forwardAccountTask(task)
+ return nil
+ }
+ // Some accounts are incomplete, leave as is for the storage and contract
+ // task assigners to pick up and fill
+ return nil
+}
+
+// HandleBytecodeRequestResult handles get bytecode result
+// it is a callback method to invoke when a batch of contract
+// bytes codes are received from a remote peer.
+func (s *FullStateDownloadManager) HandleBytecodeRequestResult(task interface{}, // Task which this request is filling
+ reqHashes []common.Hash, // Hashes of the bytecode to avoid double hashing
+ bytecodes [][]byte, // Actual bytecodes to store into the database (nil = missing)
+ loopID int,
+ streamID sttypes.StreamID) error {
+
+ s.lock.RLock()
+ syncing := !s.snapped
+ s.lock.RUnlock()
+
+ if syncing {
+ return s.onByteCodes(task.(*accountTask), bytecodes, reqHashes)
+ }
+ return s.onHealByteCodes(task.(*healTask), reqHashes, bytecodes)
+}
+
+// onByteCodes is a callback method to invoke when a batch of contract
+// bytes codes are received from a remote peer in the syncing phase.
+func (s *FullStateDownloadManager) onByteCodes(task *accountTask, bytecodes [][]byte, reqHashes []common.Hash) error {
+ var size common.StorageSize
+ for _, code := range bytecodes {
+ size += common.StorageSize(len(code))
+ }
+
+ utils.Logger().Trace().Int("bytecodes", len(bytecodes)).Interface("bytes", size).Msg("Delivering set of bytecodes")
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ // Response is valid, but check if peer is signalling that it does not have
+ // the requested data. For bytecode range queries that means the peer is not
+ // yet synced.
+ if len(bytecodes) == 0 {
+ utils.Logger().Debug().Msg("Peer rejected bytecode request")
+ return nil
+ }
+
+ // Cross reference the requested bytecodes with the response to find gaps
+ // that the serving node is missing
+ hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
+ hash := make([]byte, 32)
+
+ codes := make([][]byte, len(reqHashes))
+ for i, j := 0, 0; i < len(bytecodes); i++ {
+ // Find the next hash that we've been served, leaving misses with nils
+ hasher.Reset()
+ hasher.Write(bytecodes[i])
+ hasher.Read(hash)
+
+ for j < len(reqHashes) && !bytes.Equal(hash, reqHashes[j][:]) {
+ j++
+ }
+ if j < len(reqHashes) {
+ codes[j] = bytecodes[i]
+ j++
+ continue
+ }
+ // We've either ran out of hashes, or got unrequested data
+ utils.Logger().Warn().Int("count", len(bytecodes)-i).Msg("Unexpected bytecodes")
+ // Signal this request as failed, and ready for rescheduling
+ return errors.New("unexpected bytecode")
+ }
+ // Response validated, send it to the scheduler for filling
+ if err := s.processBytecodeResponse(task, reqHashes, codes); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// processBytecodeResponse integrates an already validated bytecode response
+// into the account tasks.
+func (s *FullStateDownloadManager) processBytecodeResponse(task *accountTask, // Task which this request is filling
+ hashes []common.Hash, // Hashes of the bytecode to avoid double hashing
+ bytecodes [][]byte, // Actual bytecodes to store into the database (nil = missing)
+) error {
+ batch := s.db.NewBatch()
+
+ var (
+ codes uint64
+ )
+ for i, hash := range hashes {
+ code := bytecodes[i]
+
+ // If the bytecode was not delivered, reschedule it
+ if code == nil {
+ task.codeTasks[hash] = struct{}{}
+ continue
+ }
+ // Code was delivered, mark it not needed any more
+ for j, account := range task.res.accounts {
+ if task.needCode[j] && hash == common.BytesToHash(account.CodeHash) {
+ task.needCode[j] = false
+ task.pend--
+ }
+ }
+ // Push the bytecode into a database batch
+ codes++
+ rawdb.WriteCode(batch, hash, code)
+ }
+ bytes := common.StorageSize(batch.ValueSize())
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to persist bytecodes", "err", err)
+ }
+ s.bytecodeSynced += codes
+ s.bytecodeBytes += bytes
+
+ utils.Logger().Debug().Interface("count", codes).Float64("bytes", float64(bytes)).Msg("Persisted set of bytecodes")
+
+ // If this delivery completed the last pending task, forward the account task
+ // to the next chunk
+ if task.pend == 0 {
+ s.forwardAccountTask(task)
+ return nil
+ }
+ // Some accounts are still incomplete, leave as is for the storage and contract
+ // task assigners to pick up and fill.
+
+ return nil
+}
+
+// estimateRemainingSlots tries to determine roughly how many slots are left in
+// a contract storage, based on the number of keys and the last hash. This method
+// assumes that the hashes are lexicographically ordered and evenly distributed.
+func estimateRemainingSlots(hashes int, last common.Hash) (uint64, error) {
+ if last == (common.Hash{}) {
+ return 0, errors.New("last hash empty")
+ }
+ space := new(big.Int).Mul(math.MaxBig256, big.NewInt(int64(hashes)))
+ space.Div(space, last.Big())
+ if !space.IsUint64() {
+ // Gigantic address space probably due to too few or malicious slots
+ return 0, errors.New("too few slots for estimation")
+ }
+ return space.Uint64() - uint64(hashes), nil
+}
+
+// Unpack retrieves the storage slots from the range packet and returns them in
+// a split flat format that's more consistent with the internal data structures.
+func (s *FullStateDownloadManager) UnpackStorages(slots [][]*message.StorageData) ([][]common.Hash, [][][]byte) {
+ var (
+ hashset = make([][]common.Hash, len(slots))
+ slotset = make([][][]byte, len(slots))
+ )
+ for i, slots := range slots {
+ hashset[i] = make([]common.Hash, len(slots))
+ slotset[i] = make([][]byte, len(slots))
+ for j, slot := range slots {
+ hashset[i][j] = common.BytesToHash(slot.Hash)
+ slotset[i][j] = slot.Body
+ }
+ }
+ return hashset, slotset
+}
+
+// HandleStorageRequestResult handles get storages result when ranges of storage slots
+// are received from a remote peer.
+func (s *FullStateDownloadManager) HandleStorageRequestResult(mainTask *accountTask,
+ subTask *storageTask,
+ reqAccounts []common.Hash,
+ roots []common.Hash,
+ origin common.Hash,
+ limit common.Hash,
+ receivedSlots [][]*message.StorageData,
+ proof [][]byte,
+ loopID int,
+ streamID sttypes.StreamID) error {
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ hashes, slots := s.UnpackStorages(receivedSlots)
+
+ // Gather some trace stats to aid in debugging issues
+ var (
+ hashCount int
+ slotCount int
+ size common.StorageSize
+ )
+ for _, hashset := range hashes {
+ size += common.StorageSize(common.HashLength * len(hashset))
+ hashCount += len(hashset)
+ }
+ for _, slotset := range slots {
+ for _, slot := range slotset {
+ size += common.StorageSize(len(slot))
+ }
+ slotCount += len(slotset)
+ }
+ for _, node := range proof {
+ size += common.StorageSize(len(node))
+ }
+
+ utils.Logger().Trace().
+ Int("accounts", len(hashes)).
+ Int("hashes", hashCount).
+ Int("slots", slotCount).
+ Int("proofs", len(proof)).
+ Interface("size", size).
+ Msg("Delivering ranges of storage slots")
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ // Reject the response if the hash sets and slot sets don't match, or if the
+ // peer sent more data than requested.
+ if len(hashes) != len(slots) {
+ utils.Logger().Warn().
+ Int("hashset", len(hashes)).
+ Int("slotset", len(slots)).
+ Msg("Hash and slot set size mismatch")
+ return errors.New("hash and slot set size mismatch")
+ }
+ if len(hashes) > len(reqAccounts) {
+ utils.Logger().Warn().
+ Int("hashset", len(hashes)).
+ Int("requested", len(reqAccounts)).
+ Msg("Hash set larger than requested")
+ return errors.New("hash set larger than requested")
+ }
+ // Response is valid, but check if peer is signalling that it does not have
+ // the requested data. For storage range queries that means the state being
+ // retrieved was either already pruned remotely, or the peer is not yet
+ // synced to our head.
+ if len(hashes) == 0 && len(proof) == 0 {
+ utils.Logger().Debug().Msg("Peer rejected storage request")
+ return nil
+ }
+
+ // Reconstruct the partial tries from the response and verify them
+ var cont bool
+
+ // If a proof was attached while the response is empty, it indicates that the
+ // requested range specified with 'origin' is empty. Construct an empty state
+ // response locally to finalize the range.
+ if len(hashes) == 0 && len(proof) > 0 {
+ hashes = append(hashes, []common.Hash{})
+ slots = append(slots, [][]byte{})
+ }
+ for i := 0; i < len(hashes); i++ {
+ // Convert the keys and proofs into an internal format
+ keys := make([][]byte, len(hashes[i]))
+ for j, key := range hashes[i] {
+ keys[j] = common.CopyBytes(key[:])
+ }
+ nodes := make(ProofList, 0, len(proof))
+ if i == len(hashes)-1 {
+ for _, node := range proof {
+ nodes = append(nodes, node)
+ }
+ }
+ var err error
+ if len(nodes) == 0 {
+ // No proof has been attached, the response must cover the entire key
+ // space and hash to the origin root.
+ _, err = trie.VerifyRangeProof(roots[i], nil, nil, keys, slots[i], nil)
+ if err != nil {
+ utils.Logger().Warn().Err(err).Msg("Storage slots failed proof")
+ return err
+ }
+ } else {
+ // A proof was attached, the response is only partial, check that the
+ // returned data is indeed part of the storage trie
+ proofdb := nodes.Set()
+
+ cont, err = trie.VerifyRangeProof(roots[i], origin[:], limit[:], keys, slots[i], proofdb)
+ if err != nil {
+ utils.Logger().Warn().Err(err).Msg("Storage range failed proof")
+ return err
+ }
+ }
+ }
+
+ if err := s.processStorageResponse(mainTask, subTask, reqAccounts, roots, hashes, slots, cont); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// processStorageResponse integrates an already validated storage response
+// into the account tasks.
+func (s *FullStateDownloadManager) processStorageResponse(mainTask *accountTask, // Task which this response belongs to
+ subTask *storageTask, // Task which this response is filling
+ accounts []common.Hash, // Account hashes requested, may be only partially filled
+ roots []common.Hash, // Storage roots requested, may be only partially filled
+ hashes [][]common.Hash, // Storage slot hashes in the returned range
+ storageSlots [][][]byte, // Storage slot values in the returned range
+ cont bool, // Whether the last storage range has a continuation
+) error {
+ batch := ethdb.HookedBatch{
+ Batch: s.db.NewBatch(),
+ OnPut: func(key []byte, value []byte) {
+ s.storageBytes += common.StorageSize(len(key) + len(value))
+ },
+ }
+ var (
+ slots int
+ oldStorageBytes = s.storageBytes
+ )
+ // Iterate over all the accounts and reconstruct their storage tries from the
+ // delivered slots
+ for i, account := range accounts {
+ // If the account was not delivered, reschedule it
+ if i >= len(hashes) {
+ mainTask.stateTasks[account] = roots[i]
+ continue
+ }
+ // State was delivered, if complete mark as not needed any more, otherwise
+ // mark the account as needing healing
+ for j, hash := range mainTask.res.hashes {
+ if account != hash {
+ continue
+ }
+ acc := mainTask.res.accounts[j]
+
+ // If the packet contains multiple contract storage slots, all
+ // but the last are surely complete. The last contract may be
+ // chunked, so check it's continuation flag.
+ if subTask == nil && mainTask.needState[j] && (i < len(hashes)-1 || !cont) {
+ mainTask.needState[j] = false
+ mainTask.pend--
+ }
+ // If the last contract was chunked, mark it as needing healing
+ // to avoid writing it out to disk prematurely.
+ if subTask == nil && !mainTask.needHeal[j] && i == len(hashes)-1 && cont {
+ mainTask.needHeal[j] = true
+ }
+ // If the last contract was chunked, we need to switch to large
+ // contract handling mode
+ if subTask == nil && i == len(hashes)-1 && cont {
+ // If we haven't yet started a large-contract retrieval, create
+ // the subtasks for it within the main account task
+ if tasks, ok := mainTask.SubTasks[account]; !ok {
+ var (
+ keys = hashes[i]
+ chunks = uint64(storageConcurrency)
+ lastKey common.Hash
+ )
+ if len(keys) > 0 {
+ lastKey = keys[len(keys)-1]
+ }
+ // If the number of slots remaining is low, decrease the
+ // number of chunks. Somewhere on the order of 10-15K slots
+ // fit into a packet of 500KB. A key/slot pair is maximum 64
+ // bytes, so pessimistically maxRequestSize/64 = 8K.
+ //
+ // Chunk so that at least 2 packets are needed to fill a task.
+ if estimate, err := estimateRemainingSlots(len(keys), lastKey); err == nil {
+ if n := estimate / (2 * (maxRequestSize / 64)); n+1 < chunks {
+ chunks = n + 1
+ }
+ utils.Logger().Debug().
+ Int("initiators", len(keys)).
+ Interface("tail", lastKey).
+ Uint64("remaining", estimate).
+ Uint64("chunks", chunks).
+ Msg("Chunked large contract")
+ } else {
+ utils.Logger().Debug().
+ Int("initiators", len(keys)).
+ Interface("tail", lastKey).
+ Uint64("chunks", chunks).
+ Msg("Chunked large contract")
+ }
+ r := newHashRange(lastKey, chunks)
+
+ // Our first task is the one that was just filled by this response.
+ batch := ethdb.HookedBatch{
+ Batch: s.db.NewBatch(),
+ OnPut: func(key []byte, value []byte) {
+ s.storageBytes += common.StorageSize(len(key) + len(value))
+ },
+ }
+ ownerAccount := account // local assignment for stacktrie writer closure
+ // options := trie.NewStackTrieOptions()
+ writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(batch, ownerAccount, path, hash, blob, s.scheme)
+ }
+ tasks = append(tasks, &storageTask{
+ Next: common.Hash{},
+ Last: r.End(),
+ root: acc.Root,
+ genBatch: batch,
+ genTrie: trie.NewStackTrie(writeFn),
+ })
+ for r.Next() {
+ batch := ethdb.HookedBatch{
+ Batch: s.db.NewBatch(),
+ OnPut: func(key []byte, value []byte) {
+ s.storageBytes += common.StorageSize(len(key) + len(value))
+ },
+ }
+ // options := trie.NewStackTrieOptions()
+ writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(batch, ownerAccount, path, hash, blob, s.scheme)
+ }
+ tasks = append(tasks, &storageTask{
+ Next: r.Start(),
+ Last: r.End(),
+ root: acc.Root,
+ genBatch: batch,
+ genTrie: trie.NewStackTrie(writeFn),
+ })
+ }
+ for _, task := range tasks {
+ utils.Logger().Debug().
+ Interface("from", task.Next).
+ Interface("last", task.Last).
+ Interface("root", acc.Root).
+ Interface("account", account).
+ Msg("Created storage sync task")
+ }
+ mainTask.SubTasks[account] = tasks
+
+ // Since we've just created the sub-tasks, this response
+ // is surely for the first one (zero origin)
+ subTask = tasks[0]
+ }
+ }
+ // If we're in large contract delivery mode, forward the subtask
+ if subTask != nil {
+ // Ensure the response doesn't overflow into the subsequent task
+ last := subTask.Last.Big()
+ // Find the first overflowing key. While at it, mark res as complete
+ // if we find the range to include or pass the 'last'
+ index := sort.Search(len(hashes[i]), func(k int) bool {
+ cmp := hashes[i][k].Big().Cmp(last)
+ if cmp >= 0 {
+ cont = false
+ }
+ return cmp > 0
+ })
+ if index >= 0 {
+ // cut off excess
+ hashes[i] = hashes[i][:index]
+ storageSlots[i] = storageSlots[i][:index]
+ }
+ // Forward the relevant storage chunk (even if created just now)
+ if cont {
+ subTask.Next = incHash(hashes[i][len(hashes[i])-1])
+ } else {
+ subTask.done = true
+ }
+ }
+ }
+ // Iterate over all the complete contracts, reconstruct the trie nodes and
+ // push them to disk. If the contract is chunked, the trie nodes will be
+ // reconstructed later.
+ slots += len(hashes[i])
+
+ if i < len(hashes)-1 || subTask == nil {
+ // no need to make local reassignment of account: this closure does not outlive the loop
+ // options := trie.NewStackTrieOptions()
+ writeFn := func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
+ rawdb.WriteTrieNode(batch, account, path, hash, blob, s.scheme)
+ }
+ tr := trie.NewStackTrie(writeFn)
+ for j := 0; j < len(hashes[i]); j++ {
+ tr.Update(hashes[i][j][:], storageSlots[i][j])
+ }
+ tr.Commit()
+ }
+ // Persist the received storage segments. These flat state maybe
+ // outdated during the sync, but it can be fixed later during the
+ // snapshot generation.
+ for j := 0; j < len(hashes[i]); j++ {
+ rawdb.WriteStorageSnapshot(batch, account, hashes[i][j], storageSlots[i][j])
+
+ // If we're storing large contracts, generate the trie nodes
+ // on the fly to not trash the gluing points
+ if i == len(hashes)-1 && subTask != nil {
+ subTask.genTrie.Update(hashes[i][j][:], storageSlots[i][j])
+ }
+ }
+ }
+ // Large contracts could have generated new trie nodes, flush them to disk
+ if subTask != nil {
+ if subTask.done {
+ root, _ := subTask.genTrie.Commit()
+ if root == subTask.root {
+ // If the chunk's root is an overflown but full delivery, clear the heal request
+ for i, account := range mainTask.res.hashes {
+ if account == accounts[len(accounts)-1] {
+ mainTask.needHeal[i] = false
+ }
+ }
+ }
+ }
+ if subTask.genBatch.ValueSize() > ethdb.IdealBatchSize || subTask.done {
+ if err := subTask.genBatch.Write(); err != nil {
+ log.Error("Failed to persist stack slots", "err", err)
+ }
+ subTask.genBatch.Reset()
+ }
+ }
+ // Flush anything written just now and update the stats
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to persist storage slots", "err", err)
+ }
+ s.storageSynced += uint64(slots)
+
+ utils.Logger().Debug().
+ Int("accounts", len(hashes)).
+ Int("slots", slots).
+ Interface("bytes", s.storageBytes-oldStorageBytes).
+ Msg("Persisted set of storage slots")
+
+ // If this delivery completed the last pending task, forward the account task
+ // to the next chunk
+ if mainTask.pend == 0 {
+ s.forwardAccountTask(mainTask)
+ return nil
+ }
+ // Some accounts are still incomplete, leave as is for the storage and contract
+ // task assigners to pick up and fill.
+
+ return nil
+}
+
+// HandleTrieNodeHealRequestResult handles get trie nodes heal result when a batch of trie nodes
+// are received from a remote peer.
+func (s *FullStateDownloadManager) HandleTrieNodeHealRequestResult(task *healTask, // Task which this request is filling
+ reqPaths []string,
+ reqHashes []common.Hash,
+ trienodes [][]byte,
+ loopID int,
+ streamID sttypes.StreamID) error {
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ var size common.StorageSize
+ for _, node := range trienodes {
+ size += common.StorageSize(len(node))
+ }
+
+ utils.Logger().Trace().
+ Int("trienodes", len(trienodes)).
+ Interface("bytes", size).
+ Msg("Delivering set of healing trienodes")
+
+ // Response is valid, but check if peer is signalling that it does not have
+ // the requested data. For bytecode range queries that means the peer is not
+ // yet synced.
+ if len(trienodes) == 0 {
+ utils.Logger().Debug().Msg("Peer rejected trienode heal request")
+ return nil
+ }
+
+ // Cross reference the requested trienodes with the response to find gaps
+ // that the serving node is missing
+ var (
+ hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState)
+ hash = make([]byte, 32)
+ nodes = make([][]byte, len(reqHashes))
+ fills uint64
+ )
+ for i, j := 0, 0; i < len(trienodes); i++ {
+ // Find the next hash that we've been served, leaving misses with nils
+ hasher.Reset()
+ hasher.Write(trienodes[i])
+ hasher.Read(hash)
+
+ for j < len(reqHashes) && !bytes.Equal(hash, reqHashes[j][:]) {
+ j++
+ }
+ if j < len(reqHashes) {
+ nodes[j] = trienodes[i]
+ fills++
+ j++
+ continue
+ }
+ // We've either ran out of hashes, or got unrequested data
+ utils.Logger().Warn().Int("count", len(trienodes)-i).Msg("Unexpected healing trienodes")
+
+ // Signal this request as failed, and ready for rescheduling
+ return errors.New("unexpected healing trienode")
+ }
+ // Response validated, send it to the scheduler for filling
+ s.trienodeHealPend.Add(fills)
+ defer func() {
+ s.trienodeHealPend.Add(^(fills - 1))
+ }()
+
+ if err := s.processTrienodeHealResponse(task, reqPaths, reqHashes, nodes); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// processTrienodeHealResponse integrates an already validated trienode response
+// into the healer tasks.
+func (s *FullStateDownloadManager) processTrienodeHealResponse(task *healTask, // Task which this request is filling
+ paths []string, // Paths of the trie nodes
+ hashes []common.Hash, // Hashes of the trie nodes to avoid double hashing
+ nodes [][]byte, // Actual trie nodes to store into the database (nil = missing)
+) error {
+ var (
+ start = time.Now()
+ fills int
+ )
+ for i, hash := range hashes {
+ node := nodes[i]
+
+ // If the trie node was not delivered, reschedule it
+ if node == nil {
+ task.trieTasks[paths[i]] = hashes[i]
+ continue
+ }
+ fills++
+
+ // Push the trie node into the state syncer
+ s.trienodeHealSynced++
+ s.trienodeHealBytes += common.StorageSize(len(node))
+
+ err := s.scheduler.ProcessNode(trie.NodeSyncResult{Path: paths[i], Data: node})
+ switch err {
+ case nil:
+ case trie.ErrAlreadyProcessed:
+ s.trienodeHealDups++
+ case trie.ErrNotRequested:
+ s.trienodeHealNops++
+ default:
+ utils.Logger().Err(err).Interface("hash", hash).Msg("Invalid trienode processed")
+ }
+ }
+ s.commitHealer(false)
+
+ // Calculate the processing rate of one filled trie node
+ rate := float64(fills) / (float64(time.Since(start)) / float64(time.Second))
+
+ // Update the currently measured trienode queueing and processing throughput.
+ //
+ // The processing rate needs to be updated uniformly independent if we've
+ // processed 1x100 trie nodes or 100x1 to keep the rate consistent even in
+ // the face of varying network packets. As such, we cannot just measure the
+ // time it took to process N trie nodes and update once, we need one update
+ // per trie node.
+ //
+ // Naively, that would be:
+ //
+ // for i:=0; i time.Second {
+ // Periodically adjust the trie node throttler
+ if float64(pending) > 2*s.trienodeHealRate {
+ s.trienodeHealThrottle *= trienodeHealThrottleIncrease
+ } else {
+ s.trienodeHealThrottle /= trienodeHealThrottleDecrease
+ }
+ if s.trienodeHealThrottle > maxTrienodeHealThrottle {
+ s.trienodeHealThrottle = maxTrienodeHealThrottle
+ } else if s.trienodeHealThrottle < minTrienodeHealThrottle {
+ s.trienodeHealThrottle = minTrienodeHealThrottle
+ }
+ s.trienodeHealThrottled = time.Now()
+
+ utils.Logger().Debug().
+ Float64("rate", s.trienodeHealRate).
+ Uint64("pending", pending).
+ Float64("throttle", s.trienodeHealThrottle).
+ Msg("Updated trie node heal throttler")
+ }
+
+ return nil
+}
+
+// HandleByteCodeHealRequestResult handles get byte codes heal result
+func (s *FullStateDownloadManager) HandleByteCodeHealRequestResult(task *healTask, // Task which this request is filling
+ hashes []common.Hash, // Hashes of the bytecode to avoid double hashing
+ codes [][]byte, // Actual bytecodes to store into the database (nil = missing)
+ loopID int,
+ streamID sttypes.StreamID) error {
+
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if err := s.processBytecodeHealResponse(task, hashes, codes); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// onHealByteCodes is a callback method to invoke when a batch of contract
+// bytes codes are received from a remote peer in the healing phase.
+func (s *FullStateDownloadManager) onHealByteCodes(task *healTask,
+ reqHashes []common.Hash,
+ bytecodes [][]byte) error {
+
+ var size common.StorageSize
+ for _, code := range bytecodes {
+ size += common.StorageSize(len(code))
+ }
+
+ utils.Logger().Trace().
+ Int("bytecodes", len(bytecodes)).
+ Interface("bytes", size).
+ Msg("Delivering set of healing bytecodes")
+
+ s.lock.Lock()
+ s.lock.Unlock()
+
+ // Response is valid, but check if peer is signalling that it does not have
+ // the requested data. For bytecode range queries that means the peer is not
+ // yet synced.
+ if len(bytecodes) == 0 {
+ utils.Logger().Debug().Msg("Peer rejected bytecode heal request")
+ return nil
+ }
+
+ // Cross reference the requested bytecodes with the response to find gaps
+ // that the serving node is missing
+ hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
+ hash := make([]byte, 32)
+
+ codes := make([][]byte, len(reqHashes))
+ for i, j := 0, 0; i < len(bytecodes); i++ {
+ // Find the next hash that we've been served, leaving misses with nils
+ hasher.Reset()
+ hasher.Write(bytecodes[i])
+ hasher.Read(hash)
+
+ for j < len(reqHashes) && !bytes.Equal(hash, reqHashes[j][:]) {
+ j++
+ }
+ if j < len(reqHashes) {
+ codes[j] = bytecodes[i]
+ j++
+ continue
+ }
+ // We've either ran out of hashes, or got unrequested data
+ utils.Logger().Warn().Int("count", len(bytecodes)-i).Msg("Unexpected healing bytecodes")
+
+ // Signal this request as failed, and ready for rescheduling
+ return errors.New("unexpected healing bytecode")
+ }
+
+ if err := s.processBytecodeHealResponse(task, reqHashes, codes); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// processBytecodeHealResponse integrates an already validated bytecode response
+// into the healer tasks.
+func (s *FullStateDownloadManager) processBytecodeHealResponse(task *healTask, // Task which this request is filling
+ hashes []common.Hash, // Hashes of the bytecode to avoid double hashing
+ codes [][]byte, // Actual bytecodes to store into the database (nil = missing)
+) error {
+ for i, hash := range hashes {
+ node := codes[i]
+
+ // If the trie node was not delivered, reschedule it
+ if node == nil {
+ task.codeTasks[hash] = struct{}{}
+ continue
+ }
+ // Push the trie node into the state syncer
+ s.bytecodeHealSynced++
+ s.bytecodeHealBytes += common.StorageSize(len(node))
+
+ err := s.scheduler.ProcessCode(trie.CodeSyncResult{Hash: hash, Data: node})
+ switch err {
+ case nil:
+ case trie.ErrAlreadyProcessed:
+ s.bytecodeHealDups++
+ case trie.ErrNotRequested:
+ s.bytecodeHealNops++
+ default:
+ log.Error("Invalid bytecode processed", "hash", hash, "err", err)
+ }
+ }
+ s.commitHealer(false)
+
+ return nil
+}
+
+// onHealState is a callback method to invoke when a flat state(account
+// or storage slot) is downloaded during the healing stage. The flat states
+// can be persisted blindly and can be fixed later in the generation stage.
+// Note it's not concurrent safe, please handle the concurrent issue outside.
+func (s *FullStateDownloadManager) onHealState(paths [][]byte, value []byte) error {
+ if len(paths) == 1 {
+ var account types.StateAccount
+ if err := rlp.DecodeBytes(value, &account); err != nil {
+ return nil // Returning the error here would drop the remote peer
+ }
+ blob := s.SlimAccountRLP(account)
+ rawdb.WriteAccountSnapshot(s.stateWriter, common.BytesToHash(paths[0]), blob)
+ s.accountHealed += 1
+ s.accountHealedBytes += common.StorageSize(1 + common.HashLength + len(blob))
+ }
+ if len(paths) == 2 {
+ rawdb.WriteStorageSnapshot(s.stateWriter, common.BytesToHash(paths[0]), common.BytesToHash(paths[1]), value)
+ s.storageHealed += 1
+ s.storageHealedBytes += common.StorageSize(1 + 2*common.HashLength + len(value))
+ }
+ if s.stateWriter.ValueSize() > ethdb.IdealBatchSize {
+ s.stateWriter.Write() // It's fine to ignore the error here
+ s.stateWriter.Reset()
+ }
+ return nil
+}
diff --git a/api/service/stagedstreamsync/syncing.go b/api/service/stagedstreamsync/syncing.go
index 738f2f9203..0db0dd4e2d 100644
--- a/api/service/stagedstreamsync/syncing.go
+++ b/api/service/stagedstreamsync/syncing.go
@@ -11,6 +11,8 @@ import (
"github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core"
+ "github.com/harmony-one/harmony/core/rawdb"
+ "github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/utils"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/ledgerwatch/erigon-lib/kv"
@@ -81,20 +83,30 @@ func CreateStagedSync(ctx context.Context,
return nil, errInitDB
}
+ extractReceiptHashes := config.SyncMode == FastSync || config.SyncMode == SnapSync
stageHeadsCfg := NewStageHeadersCfg(bc, mainDB)
stageShortRangeCfg := NewStageShortRangeCfg(bc, mainDB)
stageSyncEpochCfg := NewStageEpochCfg(bc, mainDB)
- stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress)
+ stageBodiesCfg := NewStageBodiesCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, extractReceiptHashes, config.LogProgress)
stageStatesCfg := NewStageStatesCfg(bc, mainDB, dbs, config.Concurrency, logger, config.LogProgress)
+ stageStateSyncCfg := NewStageStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress)
+ stageFullStateSyncCfg := NewStageFullStateSyncCfg(bc, mainDB, config.Concurrency, protocol, logger, config.LogProgress)
+ stageReceiptsCfg := NewStageReceiptsCfg(bc, mainDB, dbs, config.Concurrency, protocol, isBeaconNode, config.LogProgress)
lastMileCfg := NewStageLastMileCfg(ctx, bc, mainDB)
stageFinishCfg := NewStageFinishCfg(mainDB)
- stages := DefaultStages(ctx,
+ // init stages order based on sync mode
+ initStagesOrder(config.SyncMode)
+
+ defaultStages := DefaultStages(ctx,
stageHeadsCfg,
stageSyncEpochCfg,
stageShortRangeCfg,
stageBodiesCfg,
+ stageStateSyncCfg,
+ stageFullStateSyncCfg,
stageStatesCfg,
+ stageReceiptsCfg,
lastMileCfg,
stageFinishCfg,
)
@@ -112,7 +124,7 @@ func CreateStagedSync(ctx context.Context,
bc,
consensus,
mainDB,
- stages,
+ defaultStages,
isBeaconNode,
protocol,
isBeaconNode,
@@ -214,15 +226,75 @@ func (s *StagedStreamSync) Debug(source string, msg interface{}) {
}
}
+// checkPivot checks pivot block and returns pivot block and cycle Sync mode
+func (s *StagedStreamSync) checkPivot(ctx context.Context, estimatedHeight uint64, initSync bool) (*types.Block, SyncMode, error) {
+
+ if s.config.SyncMode == FullSync {
+ return nil, FullSync, nil
+ }
+
+ // do full sync if chain is at early stage
+ if initSync && estimatedHeight < MaxPivotDistanceToHead {
+ return nil, FullSync, nil
+ }
+
+ pivotBlockNumber := uint64(0)
+ var curPivot *uint64
+ if curPivot = rawdb.ReadLastPivotNumber(s.bc.ChainDb()); curPivot != nil {
+ // if head is behind pivot, that means it is still on fast/snap sync mode
+ if head := s.CurrentBlockNumber(); head < *curPivot {
+ pivotBlockNumber = *curPivot
+ // pivot could be moved forward if it is far from head
+ if pivotBlockNumber < estimatedHeight-MaxPivotDistanceToHead {
+ pivotBlockNumber = estimatedHeight - MinPivotDistanceToHead
+ }
+ }
+ } else {
+ if head := s.CurrentBlockNumber(); s.config.SyncMode == FastSync && head <= 1 {
+ pivotBlockNumber = estimatedHeight - MinPivotDistanceToHead
+ if err := rawdb.WriteLastPivotNumber(s.bc.ChainDb(), pivotBlockNumber); err != nil {
+ s.logger.Warn().Err(err).
+ Uint64("new pivot number", pivotBlockNumber).
+ Msg(WrapStagedSyncMsg("update pivot number failed"))
+ }
+ }
+ }
+ if pivotBlockNumber > 0 {
+ if block, err := s.queryAllPeersForBlockByNumber(ctx, pivotBlockNumber); err != nil {
+ s.logger.Error().Err(err).
+ Uint64("pivot", pivotBlockNumber).
+ Msg(WrapStagedSyncMsg("query peers for pivot block failed"))
+ return block, FastSync, err
+ } else {
+ if curPivot == nil || pivotBlockNumber != *curPivot {
+ if err := rawdb.WriteLastPivotNumber(s.bc.ChainDb(), pivotBlockNumber); err != nil {
+ s.logger.Warn().Err(err).
+ Uint64("new pivot number", pivotBlockNumber).
+ Msg(WrapStagedSyncMsg("update pivot number failed"))
+ return block, FastSync, err
+ }
+ }
+ s.status.pivotBlock = block
+ s.logger.Info().
+ Uint64("estimatedHeight", estimatedHeight).
+ Uint64("pivot number", pivotBlockNumber).
+ Msg(WrapStagedSyncMsg("fast/snap sync mode, pivot is set successfully"))
+ return block, FastSync, nil
+ }
+ }
+ return nil, FullSync, nil
+}
+
// doSync does the long range sync.
// One LongRangeSync consists of several iterations.
// For each iteration, estimate the current block number, then fetch block & insert to blockchain
func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bool) (uint64, int, error) {
+ startedNumber := s.bc.CurrentBlock().NumberU64()
+
var totalInserted int
s.initSync = initSync
-
if err := s.checkPrerequisites(); err != nil {
return 0, 0, err
}
@@ -236,20 +308,30 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo
//TODO: use directly currentCycle var
s.status.setTargetBN(estimatedHeight)
}
- if curBN := s.bc.CurrentBlock().NumberU64(); estimatedHeight <= curBN {
+ if curBN := s.CurrentBlockNumber(); estimatedHeight <= curBN {
s.logger.Info().Uint64("current number", curBN).Uint64("target number", estimatedHeight).
Msg(WrapStagedSyncMsg("early return of long range sync (chain is already ahead of target height)"))
return estimatedHeight, 0, nil
}
}
+ // We are probably in full sync, but we might have rewound to before the
+ // fast/snap sync pivot, check if we should reenable
+ if pivotBlock, cycleSyncMode, err := s.checkPivot(downloaderContext, estimatedHeight, initSync); err != nil {
+ s.logger.Error().Err(err).Msg(WrapStagedSyncMsg("check pivot failed"))
+ return 0, 0, err
+ } else {
+ s.status.cycleSyncMode = cycleSyncMode
+ s.status.pivotBlock = pivotBlock
+ }
+
s.startSyncing()
defer s.finishSyncing()
for {
ctx, cancel := context.WithCancel(downloaderContext)
- n, err := s.doSyncCycle(ctx, initSync)
+ n, err := s.doSyncCycle(ctx)
if err != nil {
utils.Logger().Error().
Err(err).
@@ -281,11 +363,13 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo
Bool("isBeacon", s.isBeacon).
Uint32("shard", s.bc.ShardID()).
Int("blocks", totalInserted).
+ Uint64("startedNumber", startedNumber).
+ Uint64("currentNumber", s.bc.CurrentBlock().NumberU64()).
Msg(WrapStagedSyncMsg("sync cycle blocks inserted successfully"))
}
// add consensus last mile blocks
- if s.consensus != nil {
+ if s.consensus != nil && s.isBeaconNode {
if hashes, err := s.addConsensusLastMile(s.Blockchain(), s.consensus); err != nil {
utils.Logger().Error().Err(err).
Msg("[STAGED_STREAM_SYNC] Add consensus last mile failed")
@@ -304,14 +388,14 @@ func (s *StagedStreamSync) doSync(downloaderContext context.Context, initSync bo
return estimatedHeight, totalInserted, nil
}
-func (s *StagedStreamSync) doSyncCycle(ctx context.Context, initSync bool) (int, error) {
+func (s *StagedStreamSync) doSyncCycle(ctx context.Context) (int, error) {
// TODO: initSync=true means currentCycleNumber==0, so we can remove initSync
var totalInserted int
s.inserted = 0
- startHead := s.bc.CurrentBlock().NumberU64()
+ startHead := s.CurrentBlockNumber()
canRunCycleInOneTransaction := false
var tx kv.RwTx
@@ -375,6 +459,40 @@ func (s *StagedStreamSync) checkPrerequisites() error {
return s.checkHaveEnoughStreams()
}
+func (s *StagedStreamSync) CurrentBlockNumber() uint64 {
+ // if current head is ahead of pivot block, return chain head regardless of sync mode
+ if s.status.pivotBlock != nil && s.bc.CurrentBlock().NumberU64() >= s.status.pivotBlock.NumberU64() {
+ return s.bc.CurrentBlock().NumberU64()
+ }
+
+ if s.status.pivotBlock != nil && s.bc.CurrentFastBlock().NumberU64() >= s.status.pivotBlock.NumberU64() {
+ return s.bc.CurrentFastBlock().NumberU64()
+ }
+
+ current := uint64(0)
+ switch s.config.SyncMode {
+ case FullSync:
+ current = s.bc.CurrentBlock().NumberU64()
+ case FastSync:
+ current = s.bc.CurrentFastBlock().NumberU64()
+ case SnapSync:
+ current = s.bc.CurrentHeader().Number().Uint64()
+ }
+ return current
+}
+
+func (s *StagedStreamSync) stateSyncStage() bool {
+ switch s.config.SyncMode {
+ case FullSync:
+ return false
+ case FastSync:
+ return s.status.pivotBlock != nil && s.bc.CurrentFastBlock().NumberU64() == s.status.pivotBlock.NumberU64()-1
+ case SnapSync:
+ return false
+ }
+ return false
+}
+
// estimateCurrentNumber roughly estimates the current block number.
// The block number does not need to be exact, but just a temporary target of the iteration
func (s *StagedStreamSync) estimateCurrentNumber(ctx context.Context) (uint64, error) {
@@ -414,3 +532,45 @@ func (s *StagedStreamSync) estimateCurrentNumber(ctx context.Context) (uint64, e
bn := computeBlockNumberByMaxVote(cnResults)
return bn, nil
}
+
+// queryAllPeersForBlockByNumber queries all connected streams for a block by its number.
+func (s *StagedStreamSync) queryAllPeersForBlockByNumber(ctx context.Context, bn uint64) (*types.Block, error) {
+ var (
+ blkResults []*types.Block
+ lock sync.Mutex
+ wg sync.WaitGroup
+ )
+ wg.Add(s.config.Concurrency)
+ for i := 0; i != s.config.Concurrency; i++ {
+ go func() {
+ defer wg.Done()
+ block, stid, err := s.doGetBlockByNumberRequest(ctx, bn)
+ if err != nil {
+ s.logger.Err(err).Str("streamID", string(stid)).
+ Msg(WrapStagedSyncMsg("getBlockByNumber request failed"))
+ if !errors.Is(err, context.Canceled) {
+ s.protocol.StreamFailed(stid, "getBlockByNumber request failed")
+ }
+ return
+ }
+ lock.Lock()
+ blkResults = append(blkResults, block)
+ lock.Unlock()
+ }()
+ }
+ wg.Wait()
+
+ if len(blkResults) == 0 {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ return nil, ErrZeroBlockResponse
+ }
+ block, err := getBlockByMaxVote(blkResults)
+ if err != nil {
+ return nil, err
+ }
+ return block, nil
+}
diff --git a/api/service/stagedstreamsync/types.go b/api/service/stagedstreamsync/types.go
index 6d6326452e..e46b614299 100644
--- a/api/service/stagedstreamsync/types.go
+++ b/api/service/stagedstreamsync/types.go
@@ -14,9 +14,12 @@ var (
)
type status struct {
- isSyncing bool
- targetBN uint64
- lock sync.Mutex
+ isSyncing bool
+ targetBN uint64
+ pivotBlock *types.Block
+ cycleSyncMode SyncMode
+ statesSynced bool
+ lock sync.Mutex
}
func newStatus() status {
diff --git a/api/service/stagedsync/stage_lastmile.go b/api/service/stagedsync/stage_lastmile.go
index df6079bd03..13fece8eec 100644
--- a/api/service/stagedsync/stage_lastmile.go
+++ b/api/service/stagedsync/stage_lastmile.go
@@ -49,7 +49,7 @@ func (lm *StageLastMile) Exec(firstCycle bool, invalidBlockRevert bool, s *Stage
if block == nil {
break
}
- err = s.state.UpdateBlockAndStatus(block, bc, true)
+ err = s.state.UpdateBlockAndStatus(block, bc)
if err != nil {
break
}
@@ -70,7 +70,7 @@ func (lm *StageLastMile) Exec(firstCycle bool, invalidBlockRevert bool, s *Stage
if block == nil {
break
}
- err = s.state.UpdateBlockAndStatus(block, bc, false)
+ err = s.state.UpdateBlockAndStatus(block, bc)
if err != nil {
break
}
diff --git a/api/service/stagedsync/stage_state.go b/api/service/stagedsync/stage_state.go
index 7086acec18..1e2b38bd97 100644
--- a/api/service/stagedsync/stage_state.go
+++ b/api/service/stagedsync/stage_state.go
@@ -178,7 +178,7 @@ func (stg *StageStates) Exec(firstCycle bool, invalidBlockRevert bool, s *StageS
headBeforeNewBlocks := stg.configs.bc.CurrentBlock().NumberU64()
headHashBeforeNewBlocks := stg.configs.bc.CurrentBlock().Hash()
_, err = stg.configs.bc.InsertChain(newBlocks, false) //TODO: verifyHeaders can be done here
- if err != nil {
+ if err != nil && !errors.Is(err, core.ErrKnownBlock) {
// TODO: handle chain rollback because of bad block
utils.Logger().Error().
Err(err).
diff --git a/api/service/stagedsync/stagedsync.go b/api/service/stagedsync/stagedsync.go
index f1de66f9fc..7959a05d29 100644
--- a/api/service/stagedsync/stagedsync.go
+++ b/api/service/stagedsync/stagedsync.go
@@ -1035,7 +1035,7 @@ func (ss *StagedSync) getBlockFromLastMileBlocksByParentHash(parentHash common.H
}
// UpdateBlockAndStatus updates block and its status in db
-func (ss *StagedSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain, verifyAllSig bool) error {
+func (ss *StagedSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChain) error {
if block.NumberU64() != bc.CurrentBlock().NumberU64()+1 {
utils.Logger().Debug().
Uint64("curBlockNum", bc.CurrentBlock().NumberU64()).
@@ -1043,6 +1043,7 @@ func (ss *StagedSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChai
Msg("[STAGED_SYNC] Inappropriate block number, ignore!")
return nil
}
+ verifyAllSig := true
haveCurrentSig := len(block.GetCurrentCommitSig()) != 0
// Verify block signatures
@@ -1091,13 +1092,16 @@ func (ss *StagedSync) UpdateBlockAndStatus(block *types.Block, bc core.BlockChai
}
_, err := bc.InsertChain([]*types.Block{block}, false /* verifyHeaders */)
- if err != nil {
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ case err != nil:
utils.Logger().Error().
Err(err).
Uint64("block number", block.NumberU64()).
Uint32("shard", block.ShardID()).
Msgf("[STAGED_SYNC] UpdateBlockAndStatus: Error adding new block to blockchain")
return err
+ default:
}
utils.Logger().Info().
Uint64("blockHeight", block.NumberU64()).
@@ -1218,7 +1222,11 @@ func (ss *StagedSync) addConsensusLastMile(bc core.BlockChain, cs *consensus.Con
if block == nil {
break
}
- if _, err := bc.InsertChain(types.Blocks{block}, true); err != nil {
+ _, err := bc.InsertChain(types.Blocks{block}, true)
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ case errors.Is(err, core.ErrNotLastBlockInEpoch):
+ case err != nil:
return errors.Wrap(err, "failed to InsertChain")
}
}
diff --git a/cmd/harmony/config.go b/cmd/harmony/config.go
index 5a41f22da8..037221835c 100644
--- a/cmd/harmony/config.go
+++ b/cmd/harmony/config.go
@@ -145,6 +145,35 @@ func getDefaultSyncConfig(nt nodeconfig.NetworkType) harmonyconfig.SyncConfig {
}
}
+func getDefaultCacheConfig(nt nodeconfig.NetworkType) harmonyconfig.CacheConfig {
+ cacheConfig := harmonyconfig.CacheConfig{
+ Disabled: defaultCacheConfig.Disabled,
+ TrieNodeLimit: defaultCacheConfig.TrieNodeLimit,
+ TriesInMemory: defaultCacheConfig.TriesInMemory,
+ TrieTimeLimit: defaultCacheConfig.TrieTimeLimit,
+ SnapshotLimit: defaultCacheConfig.SnapshotLimit,
+ SnapshotWait: defaultCacheConfig.SnapshotWait,
+ Preimages: defaultCacheConfig.Preimages,
+ SnapshotNoBuild: defaultCacheConfig.SnapshotNoBuild,
+ }
+
+ switch nt {
+ case nodeconfig.Mainnet:
+ cacheConfig.Disabled = true
+ cacheConfig.Preimages = true
+ case nodeconfig.Testnet:
+ cacheConfig.Disabled = false
+ cacheConfig.Preimages = true
+ case nodeconfig.Localnet:
+ cacheConfig.Disabled = false
+ cacheConfig.Preimages = false
+ default:
+ cacheConfig.Disabled = false
+ cacheConfig.Preimages = true
+ }
+ return cacheConfig
+}
+
var configCmd = &cobra.Command{
Use: "config",
Short: "dump or update config",
diff --git a/cmd/harmony/config_migrations.go b/cmd/harmony/config_migrations.go
index 8f222b3d62..0db87d0748 100644
--- a/cmd/harmony/config_migrations.go
+++ b/cmd/harmony/config_migrations.go
@@ -334,7 +334,7 @@ func init() {
migrations["2.5.11"] = func(confTree *toml.Tree) *toml.Tree {
if confTree.Get("General.TriesInMemory") == nil {
- confTree.Set("General.TriesInMemory", defaultConfig.General.TriesInMemory)
+ confTree.Set("General.TriesInMemory", defaultConfig.Cache.TriesInMemory)
}
confTree.Set("Version", "2.5.12")
return confTree
@@ -405,6 +405,17 @@ func init() {
return confTree
}
+ migrations["2.6.0"] = func(confTree *toml.Tree) *toml.Tree {
+ confTree.Delete("General.TriesInMemory")
+
+ if confTree.Get("Cache") == nil {
+ confTree.Set("Cache", defaultConfig.Cache)
+ }
+ // upgrade minor version because of `Cache` section introduction
+ confTree.Set("Version", "2.6.1")
+ return confTree
+ }
+
// check that the latest version here is the same as in default.go
largestKey := getNextVersion(migrations)
if largestKey != tomlConfigVersion {
diff --git a/cmd/harmony/default.go b/cmd/harmony/default.go
index 986a2f7f66..22b964b997 100644
--- a/cmd/harmony/default.go
+++ b/cmd/harmony/default.go
@@ -1,13 +1,15 @@
package main
import (
+ "time"
+
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/hmy"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
)
-const tomlConfigVersion = "2.6.0"
+const tomlConfigVersion = "2.6.1"
const (
defNetworkType = nodeconfig.Mainnet
@@ -24,7 +26,6 @@ var defaultConfig = harmonyconfig.HarmonyConfig{
IsOffline: false,
DataDir: "./",
TraceEnable: false,
- TriesInMemory: 128,
},
Network: getDefaultNetworkConfig(defNetworkType),
P2P: harmonyconfig.P2pConfig{
@@ -131,6 +132,7 @@ var defaultConfig = harmonyconfig.HarmonyConfig{
LowUsageThreshold: hmy.DefaultGPOConfig.LowUsageThreshold,
BlockGasLimit: hmy.DefaultGPOConfig.BlockGasLimit,
},
+ Cache: getDefaultCacheConfig(defNetworkType),
}
var defaultSysConfig = harmonyconfig.SysConfig{
@@ -192,6 +194,7 @@ var defaultStagedSyncConfig = harmonyconfig.StagedSyncConfig{
var (
defaultMainnetSyncConfig = harmonyconfig.SyncConfig{
Enabled: false,
+ SyncMode: 0,
Downloader: false,
StagedSync: false,
StagedSyncCfg: defaultStagedSyncConfig,
@@ -207,6 +210,7 @@ var (
defaultTestNetSyncConfig = harmonyconfig.SyncConfig{
Enabled: true,
+ SyncMode: 0,
Downloader: false,
StagedSync: false,
StagedSyncCfg: defaultStagedSyncConfig,
@@ -222,6 +226,7 @@ var (
defaultLocalNetSyncConfig = harmonyconfig.SyncConfig{
Enabled: true,
+ SyncMode: 0,
Downloader: true,
StagedSync: true,
StagedSyncCfg: defaultStagedSyncConfig,
@@ -237,6 +242,7 @@ var (
defaultPartnerSyncConfig = harmonyconfig.SyncConfig{
Enabled: true,
+ SyncMode: 0,
Downloader: true,
StagedSync: false,
StagedSyncCfg: defaultStagedSyncConfig,
@@ -252,6 +258,7 @@ var (
defaultElseSyncConfig = harmonyconfig.SyncConfig{
Enabled: true,
+ SyncMode: 0,
Downloader: true,
StagedSync: false,
StagedSyncCfg: defaultStagedSyncConfig,
@@ -266,6 +273,17 @@ var (
}
)
+var defaultCacheConfig = harmonyconfig.CacheConfig{
+ Disabled: false,
+ TrieNodeLimit: 256,
+ TriesInMemory: 128,
+ TrieTimeLimit: 2 * time.Minute,
+ SnapshotLimit: 256,
+ SnapshotWait: true,
+ Preimages: true,
+ SnapshotNoBuild: false,
+}
+
const (
defaultBroadcastInvalidTx = false
)
@@ -280,6 +298,7 @@ func getDefaultHmyConfigCopy(nt nodeconfig.NetworkType) harmonyconfig.HarmonyCon
}
config.Sync = getDefaultSyncConfig(nt)
config.DNSSync = getDefaultDNSSyncConfig(nt)
+ config.Cache = getDefaultCacheConfig(nt)
return config
}
@@ -319,6 +338,11 @@ func getDefaultPrometheusConfigCopy() harmonyconfig.PrometheusConfig {
return config
}
+func getDefaultCacheConfigCopy() harmonyconfig.CacheConfig {
+ config := defaultCacheConfig
+ return config
+}
+
const (
nodeTypeValidator = "validator"
nodeTypeExplorer = "explorer"
diff --git a/cmd/harmony/flags.go b/cmd/harmony/flags.go
index 46a1decb06..a52b7138ff 100644
--- a/cmd/harmony/flags.go
+++ b/cmd/harmony/flags.go
@@ -32,7 +32,6 @@ var (
legacyDataDirFlag,
taraceFlag,
- triesInMemoryFlag,
}
dnsSyncFlags = []cli.Flag{
@@ -238,6 +237,7 @@ var (
syncFlags = []cli.Flag{
syncStreamEnabledFlag,
+ syncModeFlag,
syncDownloaderFlag,
syncStagedSyncFlag,
syncConcurrencyFlag,
@@ -267,6 +267,16 @@ var (
gpoBlockGasLimitFlag,
}
+ cacheConfigFlags = []cli.Flag{
+ cacheDisabled,
+ cacheTrieNodeLimit,
+ cacheTriesInMemory,
+ cachePreimages,
+ cacheSnapshotLimit,
+ cacheSnapshotNoBuild,
+ cacheSnapshotWait,
+ }
+
metricsFlags = []cli.Flag{
metricsETHFlag,
metricsExpensiveETHFlag,
@@ -351,11 +361,6 @@ var (
Usage: "indicates if full transaction tracing should be enabled",
DefValue: defaultConfig.General.TraceEnable,
}
- triesInMemoryFlag = cli.IntFlag{
- Name: "blockchain.tries_in_memory",
- Usage: "number of blocks from header stored in disk before exiting",
- DefValue: defaultConfig.General.TriesInMemory,
- }
)
func getRootFlags() []cli.Flag {
@@ -435,14 +440,6 @@ func applyGeneralFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig)
if cli.IsFlagChanged(cmd, isBackupFlag) {
config.General.IsBackup = cli.GetBoolFlagValue(cmd, isBackupFlag)
}
-
- if cli.IsFlagChanged(cmd, triesInMemoryFlag) {
- value := cli.GetIntFlagValue(cmd, triesInMemoryFlag)
- if value <= 2 {
- panic("Must provide number greater than 2 for General.TriesInMemory")
- }
- config.General.TriesInMemory = value
- }
}
// network flags
@@ -1876,6 +1873,13 @@ var (
Usage: "Enable the stream sync protocol (experimental feature)",
DefValue: false,
}
+
+ syncModeFlag = cli.IntFlag{
+ Name: "sync.mode",
+ Usage: "synchronization mode of the downloader (0=FullSync, 1=FastSync, 2=SnapSync)",
+ DefValue: 0,
+ }
+
// TODO: Deprecate this flag, and always set to true after stream sync is fully up.
syncDownloaderFlag = cli.BoolFlag{
Name: "sync.downloader",
@@ -1937,6 +1941,10 @@ func applySyncFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) {
config.Sync.Enabled = cli.GetBoolFlagValue(cmd, syncStreamEnabledFlag)
}
+ if cli.IsFlagChanged(cmd, syncModeFlag) {
+ config.Sync.SyncMode = uint32(cli.GetIntFlagValue(cmd, syncModeFlag))
+ }
+
if cli.IsFlagChanged(cmd, syncDownloaderFlag) {
config.Sync.Downloader = cli.GetBoolFlagValue(cmd, syncDownloaderFlag)
}
@@ -2103,3 +2111,70 @@ func applyGPOFlags(cmd *cobra.Command, cfg *harmonyconfig.HarmonyConfig) {
cfg.GPO.BlockGasLimit = cli.GetIntFlagValue(cmd, gpoBlockGasLimitFlag)
}
}
+
+// cache config flags
+var (
+ cacheDisabled = cli.BoolFlag{
+ Name: "cache.disabled",
+ Usage: "Whether to disable trie write caching (archive node)",
+ DefValue: defaultCacheConfig.Disabled,
+ }
+ cacheTrieNodeLimit = cli.IntFlag{
+ Name: "cache.trie_node_limit",
+ Usage: " Memory limit (MB) at which to flush the current in-memory trie to disk",
+ DefValue: defaultCacheConfig.TrieNodeLimit,
+ }
+ cacheTriesInMemory = cli.Uint64Flag{
+ Name: "cache.tries_in_memory",
+ Usage: "Block number from the head stored in disk before exiting",
+ DefValue: defaultCacheConfig.TriesInMemory,
+ }
+ cachePreimages = cli.BoolFlag{
+ Name: "cache.preimages",
+ Usage: "Whether to store preimage of trie key to the disk",
+ DefValue: defaultCacheConfig.Preimages,
+ }
+ cacheSnapshotLimit = cli.IntFlag{
+ Name: "cache.snapshot_limit",
+ Usage: "Memory allowance (MB) to use for caching snapshot entries in memory",
+ DefValue: defaultCacheConfig.SnapshotLimit,
+ }
+ cacheSnapshotNoBuild = cli.BoolFlag{
+ Name: "cache.snapshot_no_build",
+ Usage: "Whether the background generation is allowed",
+ DefValue: defaultCacheConfig.SnapshotNoBuild,
+ }
+ cacheSnapshotWait = cli.BoolFlag{
+ Name: "cache.snapshot_wait",
+ Usage: "Wait for snapshot construction on startup",
+ DefValue: defaultCacheConfig.SnapshotWait,
+ }
+)
+
+func applyCacheFlags(cmd *cobra.Command, cfg *harmonyconfig.HarmonyConfig) {
+ if cli.IsFlagChanged(cmd, cacheDisabled) {
+ cfg.Cache.Disabled = cli.GetBoolFlagValue(cmd, cacheDisabled)
+ }
+ if cli.IsFlagChanged(cmd, cacheTrieNodeLimit) {
+ cfg.Cache.TrieNodeLimit = cli.GetIntFlagValue(cmd, cacheTrieNodeLimit)
+ }
+ if cli.IsFlagChanged(cmd, cacheTriesInMemory) {
+ value := cli.GetUint64FlagValue(cmd, cacheTriesInMemory)
+ if value <= 2 {
+ panic("Must provide number greater than 2 for Cache.TriesInMemory")
+ }
+ cfg.Cache.TriesInMemory = value
+ }
+ if cli.IsFlagChanged(cmd, cachePreimages) {
+ cfg.Cache.Preimages = cli.GetBoolFlagValue(cmd, cachePreimages)
+ }
+ if cli.IsFlagChanged(cmd, cacheSnapshotLimit) {
+ cfg.Cache.SnapshotLimit = cli.GetIntFlagValue(cmd, cacheSnapshotLimit)
+ }
+ if cli.IsFlagChanged(cmd, cacheSnapshotNoBuild) {
+ cfg.Cache.SnapshotNoBuild = cli.GetBoolFlagValue(cmd, cacheSnapshotNoBuild)
+ }
+ if cli.IsFlagChanged(cmd, cacheSnapshotWait) {
+ cfg.Cache.SnapshotWait = cli.GetBoolFlagValue(cmd, cacheSnapshotWait)
+ }
+}
diff --git a/cmd/harmony/flags_test.go b/cmd/harmony/flags_test.go
index bea0e0eabe..ffe261b39a 100644
--- a/cmd/harmony/flags_test.go
+++ b/cmd/harmony/flags_test.go
@@ -37,12 +37,11 @@ func TestHarmonyFlags(t *testing.T) {
expConfig: harmonyconfig.HarmonyConfig{
Version: tomlConfigVersion,
General: harmonyconfig.GeneralConfig{
- NodeType: "validator",
- NoStaking: false,
- ShardID: -1,
- IsArchival: false,
- DataDir: "./",
- TriesInMemory: 128,
+ NodeType: "validator",
+ NoStaking: false,
+ ShardID: -1,
+ IsArchival: false,
+ DataDir: "./",
},
Network: harmonyconfig.NetworkConfig{
NetworkType: "mainnet",
@@ -183,6 +182,16 @@ func TestHarmonyFlags(t *testing.T) {
LowUsageThreshold: defaultConfig.GPO.LowUsageThreshold,
BlockGasLimit: defaultConfig.GPO.BlockGasLimit,
},
+ Cache: harmonyconfig.CacheConfig{
+ Disabled: defaultConfig.Cache.Disabled,
+ TrieNodeLimit: defaultCacheConfig.TrieNodeLimit,
+ TriesInMemory: defaultConfig.Cache.TriesInMemory,
+ TrieTimeLimit: defaultConfig.Cache.TrieTimeLimit,
+ SnapshotLimit: defaultConfig.Cache.SnapshotLimit,
+ SnapshotWait: defaultConfig.Cache.SnapshotWait,
+ Preimages: defaultConfig.Cache.Preimages,
+ SnapshotNoBuild: defaultConfig.Cache.SnapshotNoBuild,
+ },
},
},
}
@@ -208,80 +217,63 @@ func TestGeneralFlags(t *testing.T) {
{
args: []string{},
expConfig: harmonyconfig.GeneralConfig{
- NodeType: "validator",
- NoStaking: false,
- ShardID: -1,
- IsArchival: false,
- DataDir: "./",
- TriesInMemory: 128,
+ NodeType: "validator",
+ NoStaking: false,
+ ShardID: -1,
+ IsArchival: false,
+ DataDir: "./",
},
},
{
args: []string{"--run", "explorer", "--run.legacy", "--run.shard=0",
"--run.archive=true", "--datadir=./.hmy"},
expConfig: harmonyconfig.GeneralConfig{
- NodeType: "explorer",
- NoStaking: true,
- ShardID: 0,
- IsArchival: true,
- DataDir: "./.hmy",
- TriesInMemory: 128,
+ NodeType: "explorer",
+ NoStaking: true,
+ ShardID: 0,
+ IsArchival: true,
+ DataDir: "./.hmy",
},
},
{
args: []string{"--node_type", "explorer", "--staking", "--shard_id", "0",
"--is_archival", "--db_dir", "./"},
expConfig: harmonyconfig.GeneralConfig{
- NodeType: "explorer",
- NoStaking: false,
- ShardID: 0,
- IsArchival: true,
- DataDir: "./",
- TriesInMemory: 128,
+ NodeType: "explorer",
+ NoStaking: false,
+ ShardID: 0,
+ IsArchival: true,
+ DataDir: "./",
},
},
{
args: []string{"--staking=false", "--is_archival=false"},
expConfig: harmonyconfig.GeneralConfig{
- NodeType: "validator",
- NoStaking: true,
- ShardID: -1,
- IsArchival: false,
- DataDir: "./",
- TriesInMemory: 128,
+ NodeType: "validator",
+ NoStaking: true,
+ ShardID: -1,
+ IsArchival: false,
+ DataDir: "./",
},
},
{
args: []string{"--run", "explorer", "--run.shard", "0"},
expConfig: harmonyconfig.GeneralConfig{
- NodeType: "explorer",
- NoStaking: false,
- ShardID: 0,
- IsArchival: false,
- DataDir: "./",
- TriesInMemory: 128,
+ NodeType: "explorer",
+ NoStaking: false,
+ ShardID: 0,
+ IsArchival: false,
+ DataDir: "./",
},
},
{
args: []string{"--run", "explorer", "--run.shard", "0", "--run.archive=false"},
expConfig: harmonyconfig.GeneralConfig{
- NodeType: "explorer",
- NoStaking: false,
- ShardID: 0,
- IsArchival: false,
- DataDir: "./",
- TriesInMemory: 128,
- },
- },
- {
- args: []string{"--blockchain.tries_in_memory", "64"},
- expConfig: harmonyconfig.GeneralConfig{
- NodeType: "validator",
- NoStaking: false,
- ShardID: -1,
- IsArchival: false,
- DataDir: "./",
- TriesInMemory: 64,
+ NodeType: "explorer",
+ NoStaking: false,
+ ShardID: 0,
+ IsArchival: false,
+ DataDir: "./",
},
},
}
@@ -1435,6 +1427,58 @@ func TestGPOFlags(t *testing.T) {
}
}
+func TestCacheFlags(t *testing.T) {
+ tests := []struct {
+ args []string
+ expConfig harmonyconfig.CacheConfig
+ expErr error
+ }{
+ {
+ args: []string{},
+ expConfig: harmonyconfig.CacheConfig{
+ Disabled: true, // based on network type
+ TrieNodeLimit: defaultCacheConfig.TrieNodeLimit,
+ TriesInMemory: defaultCacheConfig.TriesInMemory,
+ TrieTimeLimit: defaultCacheConfig.TrieTimeLimit,
+ SnapshotLimit: defaultCacheConfig.SnapshotLimit,
+ SnapshotWait: defaultCacheConfig.SnapshotWait,
+ Preimages: defaultCacheConfig.Preimages, // based on network type
+ SnapshotNoBuild: defaultCacheConfig.SnapshotNoBuild,
+ },
+ },
+ {
+ args: []string{"--cache.disabled=true", "--cache.trie_node_limit", "512", "--cache.tries_in_memory", "256", "--cache.preimages=false", "--cache.snapshot_limit", "512", "--cache.snapshot_no_build=true", "--cache.snapshot_wait=false"},
+ expConfig: harmonyconfig.CacheConfig{
+ Disabled: true,
+ TrieNodeLimit: 512,
+ TriesInMemory: 256,
+ TrieTimeLimit: 2 * time.Minute,
+ SnapshotLimit: 512,
+ SnapshotWait: false,
+ Preimages: false,
+ SnapshotNoBuild: true,
+ },
+ },
+ }
+
+ for i, test := range tests {
+ ts := newFlagTestSuite(t, cacheConfigFlags, applyCacheFlags)
+ hc, err := ts.run(test.args)
+
+ if assErr := assertError(err, test.expErr); assErr != nil {
+ t.Fatalf("Test %v: %v", i, assErr)
+ }
+ if err != nil || test.expErr != nil {
+ continue
+ }
+
+ if !reflect.DeepEqual(hc.Cache, test.expConfig) {
+ t.Errorf("Test %v:\n\t%+v\n\t%+v", i, hc.Cache, test.expConfig)
+ }
+ ts.tearDown()
+ }
+}
+
func TestDevnetFlags(t *testing.T) {
tests := []struct {
args []string
diff --git a/cmd/harmony/main.go b/cmd/harmony/main.go
index 619a1771f9..31332b2e6e 100644
--- a/cmd/harmony/main.go
+++ b/cmd/harmony/main.go
@@ -14,28 +14,12 @@ import (
"syscall"
"time"
- "github.com/harmony-one/harmony/consensus/quorum"
- "github.com/harmony-one/harmony/internal/chain"
- "github.com/harmony-one/harmony/internal/registry"
- "github.com/harmony-one/harmony/internal/shardchain/tikv_manage"
- "github.com/harmony-one/harmony/internal/tikv/redis_helper"
- "github.com/harmony-one/harmony/internal/tikv/statedb_cache"
-
- "github.com/harmony-one/harmony/api/service/crosslink_sending"
- rosetta_common "github.com/harmony-one/harmony/rosetta/common"
-
- harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
- rpc_common "github.com/harmony-one/harmony/rpc/common"
-
ethCommon "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
- "github.com/pkg/errors"
- "github.com/spf13/cobra"
-
"github.com/harmony-one/bls/ffi/go/bls"
-
"github.com/harmony-one/harmony/api/service"
+ "github.com/harmony-one/harmony/api/service/crosslink_sending"
"github.com/harmony-one/harmony/api/service/pprof"
"github.com/harmony-one/harmony/api/service/prometheus"
"github.com/harmony-one/harmony/api/service/stagedstreamsync"
@@ -43,22 +27,33 @@ import (
"github.com/harmony-one/harmony/common/fdlimit"
"github.com/harmony-one/harmony/common/ntp"
"github.com/harmony-one/harmony/consensus"
+ "github.com/harmony-one/harmony/consensus/quorum"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/hmy/downloader"
+ "github.com/harmony-one/harmony/internal/chain"
"github.com/harmony-one/harmony/internal/cli"
"github.com/harmony-one/harmony/internal/common"
+ harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
"github.com/harmony-one/harmony/internal/genesis"
"github.com/harmony-one/harmony/internal/params"
+ "github.com/harmony-one/harmony/internal/registry"
"github.com/harmony-one/harmony/internal/shardchain"
+ "github.com/harmony-one/harmony/internal/shardchain/tikv_manage"
+ "github.com/harmony-one/harmony/internal/tikv/redis_helper"
+ "github.com/harmony-one/harmony/internal/tikv/statedb_cache"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/multibls"
"github.com/harmony-one/harmony/node"
"github.com/harmony-one/harmony/numeric"
"github.com/harmony-one/harmony/p2p"
+ rosetta_common "github.com/harmony-one/harmony/rosetta/common"
+ rpc_common "github.com/harmony-one/harmony/rpc/common"
"github.com/harmony-one/harmony/shard"
"github.com/harmony-one/harmony/webhooks"
+ "github.com/pkg/errors"
+ "github.com/spf13/cobra"
)
// Host
@@ -250,6 +245,7 @@ func applyRootFlags(cmd *cobra.Command, config *harmonyconfig.HarmonyConfig) {
applySyncFlags(cmd, config)
applyShardDataFlags(cmd, config)
applyGPOFlags(cmd, config)
+ applyCacheFlags(cmd, config)
}
func setupNodeLog(config harmonyconfig.HarmonyConfig) {
@@ -788,6 +784,8 @@ func setupChain(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigTyp
}
func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfig.ConfigType, registry *registry.Registry) *node.Node {
+ decider := quorum.NewDecider(quorum.SuperMajorityVote, uint32(hc.General.ShardID))
+
// Parse minPeers from harmonyconfig.HarmonyConfig
var minPeers int
var aggregateSig bool
@@ -821,7 +819,6 @@ func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfi
registry.SetCxPool(cxPool)
// Consensus object.
- decider := quorum.NewDecider(quorum.SuperMajorityVote, nodeConfig.ShardID)
registry.SetIsBackup(isBackup(hc))
currentConsensus, err := consensus.New(
myHost, nodeConfig.ShardID, nodeConfig.ConsensusPriKey, registry, decider, minPeers, aggregateSig)
@@ -866,7 +863,7 @@ func setupConsensusAndNode(hc harmonyconfig.HarmonyConfig, nodeConfig *nodeconfi
currentNode.NodeConfig.ConsensusPriKey = nodeConfig.ConsensusPriKey
// This needs to be executed after consensus setup
- if err := currentNode.InitConsensusWithValidators(); err != nil {
+ if err := currentConsensus.InitConsensusWithValidators(); err != nil {
utils.Logger().Warn().
Int("shardID", hc.General.ShardID).
Err(err).
@@ -1009,6 +1006,7 @@ func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.Har
sConfig := stagedstreamsync.Config{
ServerOnly: !hc.Sync.Downloader,
+ SyncMode: stagedstreamsync.SyncMode(hc.Sync.SyncMode),
Network: nodeconfig.NetworkType(hc.Network.NetworkType),
Concurrency: hc.Sync.Concurrency,
MinStreams: hc.Sync.MinPeers,
@@ -1020,7 +1018,7 @@ func setupStagedSyncService(node *node.Node, host p2p.Host, hc harmonyconfig.Har
SmDiscBatch: hc.Sync.DiscBatch,
UseMemDB: hc.Sync.StagedSyncCfg.UseMemDB,
LogProgress: hc.Sync.StagedSyncCfg.LogProgress,
- DebugMode: hc.Sync.StagedSyncCfg.DebugMode,
+ DebugMode: true, // hc.Sync.StagedSyncCfg.DebugMode,
}
// If we are running side chain, we will need to do some extra works for beacon
diff --git a/consensus/consensus.go b/consensus/consensus.go
index 09bdef51ae..6f019b2a9b 100644
--- a/consensus/consensus.go
+++ b/consensus/consensus.go
@@ -6,19 +6,20 @@ import (
"sync/atomic"
"time"
- "github.com/harmony-one/harmony/consensus/engine"
- "github.com/harmony-one/harmony/core"
- "github.com/harmony-one/harmony/crypto/bls"
- "github.com/harmony-one/harmony/internal/registry"
-
"github.com/harmony-one/abool"
bls_core "github.com/harmony-one/bls/ffi/go/bls"
+ "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/consensus/quorum"
+ "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
+ "github.com/harmony-one/harmony/crypto/bls"
bls_cosi "github.com/harmony-one/harmony/crypto/bls"
+ "github.com/harmony-one/harmony/internal/registry"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/multibls"
"github.com/harmony-one/harmony/p2p"
+ "github.com/harmony-one/harmony/shard"
+ "github.com/harmony-one/harmony/shard/committee"
"github.com/harmony-one/harmony/staking/slash"
"github.com/pkg/errors"
)
@@ -39,12 +40,13 @@ const (
AsyncProposal
)
-// VerifyBlockFunc is a function used to verify the block and keep trace of verified blocks
-type VerifyBlockFunc func(*types.Block) error
+type DownloadAsync interface {
+ DownloadAsync()
+}
// Consensus is the main struct with all states and data related to consensus process.
type Consensus struct {
- Decider quorum.Decider
+ decider quorum.Decider
// FBFTLog stores the pbft messages and blocks during FBFT process
fBFTLog *FBFTLog
// phase: different phase of FBFT protocol: pre-prepare, prepare, commit, finish etc
@@ -94,12 +96,8 @@ type Consensus struct {
// The post-consensus job func passed from Node object
// Called when consensus on a new block is done
PostConsensusJob func(*types.Block) error
- // The verifier func passed from Node object
- BlockVerifier VerifyBlockFunc
// verified block to state sync broadcast
VerifiedNewBlock chan *types.Block
- // will trigger state syncing when blockNum is low
- BlockNumLowChan chan struct{}
// Channel for DRG protocol to send pRnd (preimage of randomness resulting from combined vrf
// randomnesses) to consensus. The first 32 bytes are randomness, the rest is for bitmap.
PRndChannel chan []byte
@@ -128,7 +126,7 @@ type Consensus struct {
// finalityCounter keep tracks of the finality time
finalityCounter atomic.Value //int64
- dHelper *downloadHelper
+ dHelper DownloadAsync
// Both flags only for initialization state.
start bool
@@ -170,7 +168,7 @@ func (consensus *Consensus) Beaconchain() core.BlockChain {
return consensus.registry.GetBeaconchain()
}
-// VerifyBlock is a function used to verify the block and keep trace of verified blocks.
+// verifyBlock is a function used to verify the block and keep trace of verified blocks.
func (consensus *Consensus) verifyBlock(block *types.Block) error {
if !consensus.fBFTLog.IsBlockVerified(block.Hash()) {
if err := consensus.BlockVerifier(block); err != nil {
@@ -184,21 +182,27 @@ func (consensus *Consensus) verifyBlock(block *types.Block) error {
// BlocksSynchronized lets the main loop know that block synchronization finished
// thus the blockchain is likely to be up to date.
func (consensus *Consensus) BlocksSynchronized() {
+ err := consensus.AddConsensusLastMile()
+ if err != nil {
+ consensus.GetLogger().Error().Err(err).Msg("add last mile failed")
+ }
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
consensus.syncReadyChan()
}
// BlocksNotSynchronized lets the main loop know that block is not synchronized
-func (consensus *Consensus) BlocksNotSynchronized() {
+func (consensus *Consensus) BlocksNotSynchronized(reason string) {
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
- consensus.syncNotReadyChan()
+ consensus.syncNotReadyChan(reason)
}
// VdfSeedSize returns the number of VRFs for VDF computation
func (consensus *Consensus) VdfSeedSize() int {
- return int(consensus.Decider.ParticipantsCount()) * 2 / 3
+ consensus.mutex.RLock()
+ defer consensus.mutex.RUnlock()
+ return int(consensus.decider.ParticipantsCount()) * 2 / 3
}
// GetPublicKeys returns the public keys
@@ -268,20 +272,20 @@ func New(
Decider quorum.Decider, minPeers int, aggregateSig bool,
) (*Consensus, error) {
consensus := Consensus{
- mutex: &sync.RWMutex{},
- ShardID: shard,
- fBFTLog: NewFBFTLog(),
- phase: FBFTAnnounce,
- current: State{mode: Normal},
- Decider: Decider,
- registry: registry,
- MinPeers: minPeers,
- AggregateSig: aggregateSig,
- host: host,
- msgSender: NewMessageSender(host),
- BlockNumLowChan: make(chan struct{}, 1),
+ mutex: &sync.RWMutex{},
+ ShardID: shard,
+ fBFTLog: NewFBFTLog(),
+ phase: FBFTAnnounce,
+ current: State{mode: Normal},
+ decider: Decider,
+ registry: registry,
+ MinPeers: minPeers,
+ AggregateSig: aggregateSig,
+ host: host,
+ msgSender: NewMessageSender(host),
// FBFT timeout
consensusTimeout: createTimeout(),
+ dHelper: downloadAsync{},
}
if multiBLSPriKey != nil {
@@ -296,7 +300,7 @@ func New(
// viewID has to be initialized as the height of
// the blockchain during initialization as it was
// displayed on explorer as Height right now
- consensus.SetCurBlockViewID(0)
+ consensus.setCurBlockViewID(0)
consensus.SlashChan = make(chan slash.Record)
consensus.readySignal = make(chan ProposalType)
consensus.commitSigChannel = make(chan []byte)
@@ -305,11 +309,6 @@ func New(
consensus.IgnoreViewIDCheck = abool.NewBool(false)
// Make Sure Verifier is not null
consensus.vc = newViewChange()
- // TODO: reference to blockchain/beaconchain should be removed.
- verifier := VerifyNewBlock(registry.GetWebHooks(), consensus.Blockchain(), consensus.Beaconchain())
- consensus.BlockVerifier = verifier
- consensus.vc.verifyBlock = consensus.verifyBlock
-
// init prometheus metrics
initMetrics()
consensus.AddPubkeyMetrics()
@@ -324,3 +323,72 @@ func (consensus *Consensus) GetHost() p2p.Host {
func (consensus *Consensus) Registry() *registry.Registry {
return consensus.registry
}
+
+func (consensus *Consensus) Decider() quorum.Decider {
+ return quorum.NewThreadSafeDecider(consensus.decider, consensus.mutex)
+}
+
+// InitConsensusWithValidators initialize shard state
+// from latest epoch and update committee pub
+// keys for consensus
+func (consensus *Consensus) InitConsensusWithValidators() (err error) {
+ shardID := consensus.ShardID
+ currentBlock := consensus.Blockchain().CurrentBlock()
+ blockNum := currentBlock.NumberU64()
+ consensus.SetMode(Listening)
+ epoch := currentBlock.Epoch()
+ utils.Logger().Info().
+ Uint64("blockNum", blockNum).
+ Uint32("shardID", shardID).
+ Uint64("epoch", epoch.Uint64()).
+ Msg("[InitConsensusWithValidators] Try To Get PublicKeys")
+ shardState, err := committee.WithStakingEnabled.Compute(
+ epoch, consensus.Blockchain(),
+ )
+ if err != nil {
+ utils.Logger().Err(err).
+ Uint64("blockNum", blockNum).
+ Uint32("shardID", shardID).
+ Uint64("epoch", epoch.Uint64()).
+ Msg("[InitConsensusWithValidators] Failed getting shard state")
+ return err
+ }
+ subComm, err := shardState.FindCommitteeByID(shardID)
+ if err != nil {
+ utils.Logger().Err(err).
+ Interface("shardState", shardState).
+ Msg("[InitConsensusWithValidators] Find CommitteeByID")
+ return err
+ }
+ pubKeys, err := subComm.BLSPublicKeys()
+ if err != nil {
+ utils.Logger().Error().
+ Uint32("shardID", shardID).
+ Uint64("blockNum", blockNum).
+ Msg("[InitConsensusWithValidators] PublicKeys is Empty, Cannot update public keys")
+ return errors.Wrapf(
+ err,
+ "[InitConsensusWithValidators] PublicKeys is Empty, Cannot update public keys",
+ )
+ }
+
+ for _, key := range pubKeys {
+ if consensus.GetPublicKeys().Contains(key.Object) {
+ utils.Logger().Info().
+ Uint64("blockNum", blockNum).
+ Int("numPubKeys", len(pubKeys)).
+ Str("mode", consensus.Mode().String()).
+ Msg("[InitConsensusWithValidators] Successfully updated public keys")
+ consensus.UpdatePublicKeys(pubKeys, shard.Schedule.InstanceForEpoch(epoch).ExternalAllowlist())
+ consensus.SetMode(Normal)
+ return nil
+ }
+ }
+ return nil
+}
+
+type downloadAsync struct {
+}
+
+func (a downloadAsync) DownloadAsync() {
+}
diff --git a/consensus/consensus_service.go b/consensus/consensus_service.go
index 23f0b47519..48324c4788 100644
--- a/consensus/consensus_service.go
+++ b/consensus/consensus_service.go
@@ -82,7 +82,7 @@ func (consensus *Consensus) UpdatePublicKeys(pubKeys, allowlist []bls_cosi.Publi
}
func (consensus *Consensus) updatePublicKeys(pubKeys, allowlist []bls_cosi.PublicKeyWrapper) int64 {
- consensus.Decider.UpdateParticipants(pubKeys, allowlist)
+ consensus.decider.UpdateParticipants(pubKeys, allowlist)
consensus.getLogger().Info().Msg("My Committee updated")
for i := range pubKeys {
consensus.getLogger().Info().
@@ -91,7 +91,7 @@ func (consensus *Consensus) updatePublicKeys(pubKeys, allowlist []bls_cosi.Publi
Msg("Member")
}
- allKeys := consensus.Decider.Participants()
+ allKeys := consensus.decider.Participants()
if len(allKeys) != 0 {
consensus.LeaderPubKey = &allKeys[0]
consensus.getLogger().Info().
@@ -115,7 +115,7 @@ func (consensus *Consensus) updatePublicKeys(pubKeys, allowlist []bls_cosi.Publi
if !consensus.isViewChangingMode() {
consensus.resetViewChangeState()
}
- return consensus.Decider.ParticipantsCount()
+ return consensus.decider.ParticipantsCount()
}
// Sign on the hash of the message
@@ -144,7 +144,7 @@ func (consensus *Consensus) updateBitmaps() {
consensus.getLogger().Debug().
Str("MessageType", consensus.phase.String()).
Msg("[UpdateBitmaps] Updating consensus bitmaps")
- members := consensus.Decider.Participants()
+ members := consensus.decider.Participants()
prepareBitmap := bls_cosi.NewMask(members)
commitBitmap := bls_cosi.NewMask(members)
multiSigBitmap := bls_cosi.NewMask(members)
@@ -160,7 +160,7 @@ func (consensus *Consensus) resetState() {
consensus.blockHash = [32]byte{}
consensus.block = []byte{}
- consensus.Decider.ResetPrepareAndCommitVotes()
+ consensus.decider.ResetPrepareAndCommitVotes()
if consensus.prepareBitmap != nil {
consensus.prepareBitmap.Clear()
}
@@ -179,7 +179,7 @@ func (consensus *Consensus) IsValidatorInCommittee(pubKey bls.SerializedPublicKe
}
func (consensus *Consensus) isValidatorInCommittee(pubKey bls.SerializedPublicKey) bool {
- return consensus.Decider.IndexOf(pubKey) != -1
+ return consensus.decider.IndexOf(pubKey) != -1
}
// SetMode sets the mode of consensus
@@ -271,7 +271,7 @@ func (consensus *Consensus) setBlockNum(blockNum uint64) {
// ReadSignatureBitmapPayload read the payload for signature and bitmap; offset is the beginning position of reading
func (consensus *Consensus) ReadSignatureBitmapPayload(recvPayload []byte, offset int) (*bls_core.Sign, *bls_cosi.Mask, error) {
consensus.mutex.RLock()
- members := consensus.Decider.Participants()
+ members := consensus.decider.Participants()
consensus.mutex.RUnlock()
return consensus.readSignatureBitmapPayload(recvPayload, offset, members)
}
@@ -334,12 +334,12 @@ func (consensus *Consensus) updateConsensusInformation() Mode {
isFirstTimeStaking := consensus.Blockchain().Config().IsStaking(nextEpoch) &&
curHeader.IsLastBlockInEpoch() && !consensus.Blockchain().Config().IsStaking(curEpoch)
haventUpdatedDecider := consensus.Blockchain().Config().IsStaking(curEpoch) &&
- consensus.Decider.Policy() != quorum.SuperMajorityStake
+ consensus.decider.Policy() != quorum.SuperMajorityStake
// Only happens once, the flip-over to a new Decider policy
if isFirstTimeStaking || haventUpdatedDecider {
decider := quorum.NewDecider(quorum.SuperMajorityStake, consensus.ShardID)
- consensus.Decider = decider
+ consensus.decider = decider
}
var committeeToSet *shard.Committee
@@ -412,7 +412,7 @@ func (consensus *Consensus) updateConsensusInformation() Mode {
consensus.updatePublicKeys(pubKeys, shard.Schedule.InstanceForEpoch(nextEpoch).ExternalAllowlist())
// Update voters in the committee
- if _, err := consensus.Decider.SetVoters(
+ if _, err := consensus.decider.SetVoters(
committeeToSet, epochToSet,
); err != nil {
consensus.getLogger().Error().
@@ -514,12 +514,14 @@ func (consensus *Consensus) setViewIDs(height uint64) {
// SetCurBlockViewID set the current view ID
func (consensus *Consensus) SetCurBlockViewID(viewID uint64) uint64 {
- return consensus.current.SetCurBlockViewID(viewID)
+ consensus.mutex.Lock()
+ defer consensus.mutex.Unlock()
+ return consensus.setCurBlockViewID(viewID)
}
// SetCurBlockViewID set the current view ID
-func (consensus *Consensus) setCurBlockViewID(viewID uint64) {
- consensus.current.SetCurBlockViewID(viewID)
+func (consensus *Consensus) setCurBlockViewID(viewID uint64) uint64 {
+ return consensus.current.SetCurBlockViewID(viewID)
}
// SetViewChangingID set the current view change ID
@@ -580,7 +582,7 @@ func (consensus *Consensus) selfCommit(payload []byte) error {
return errGetPreparedBlock
}
- aggSig, mask, err := consensus.readSignatureBitmapPayload(payload, 32, consensus.Decider.Participants())
+ aggSig, mask, err := consensus.readSignatureBitmapPayload(payload, 32, consensus.decider.Participants())
if err != nil {
return errReadBitmapPayload
}
@@ -604,7 +606,7 @@ func (consensus *Consensus) selfCommit(payload []byte) error {
continue
}
- if _, err := consensus.Decider.AddNewVote(
+ if _, err := consensus.decider.AddNewVote(
quorum.Commit,
[]*bls_cosi.PublicKeyWrapper{key.Pub},
key.Pri.SignHash(commitPayload),
@@ -625,14 +627,18 @@ func (consensus *Consensus) selfCommit(payload []byte) error {
// NumSignaturesIncludedInBlock returns the number of signatures included in the block
func (consensus *Consensus) NumSignaturesIncludedInBlock(block *types.Block) uint32 {
count := uint32(0)
- members := consensus.Decider.Participants()
+ consensus.mutex.Lock()
+ members := consensus.decider.Participants()
+ pubKeys := consensus.getPublicKeys()
+ consensus.mutex.Unlock()
+
// TODO(audit): do not reconstruct the Mask
mask := bls.NewMask(members)
err := mask.SetMask(block.Header().LastCommitBitmap())
if err != nil {
return count
}
- for _, key := range consensus.GetPublicKeys() {
+ for _, key := range pubKeys {
if ok, err := mask.KeyEnabled(key.Bytes); err == nil && ok {
count++
}
@@ -658,40 +664,38 @@ func (consensus *Consensus) getLogger() *zerolog.Logger {
return &logger
}
-// VerifyNewBlock is called by consensus participants to verify the block (account model) they are
+// BlockVerifier is called by consensus participants to verify the block (account model) they are
// running consensus on.
-func VerifyNewBlock(hooks *webhooks.Hooks, blockChain core.BlockChain, beaconChain core.BlockChain) func(*types.Block) error {
- return func(newBlock *types.Block) error {
- if err := blockChain.ValidateNewBlock(newBlock, beaconChain); err != nil {
- switch {
- case errors.Is(err, core.ErrKnownBlock):
- return nil
- default:
- }
+func (consensus *Consensus) BlockVerifier(newBlock *types.Block) error {
+ if err := consensus.Blockchain().ValidateNewBlock(newBlock, consensus.Beaconchain()); err != nil {
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ return nil
+ default:
+ }
- if hooks := hooks; hooks != nil {
- if p := hooks.ProtocolIssues; p != nil {
- url := p.OnCannotCommit
- go func() {
- webhooks.DoPost(url, map[string]interface{}{
- "bad-header": newBlock.Header(),
- "reason": err.Error(),
- })
- }()
- }
+ if hooks := consensus.registry.GetWebHooks(); hooks != nil {
+ if p := hooks.ProtocolIssues; p != nil {
+ url := p.OnCannotCommit
+ go func() {
+ webhooks.DoPost(url, map[string]interface{}{
+ "bad-header": newBlock.Header(),
+ "reason": err.Error(),
+ })
+ }()
}
- utils.Logger().Error().
- Str("blockHash", newBlock.Hash().Hex()).
- Int("numTx", len(newBlock.Transactions())).
- Int("numStakingTx", len(newBlock.StakingTransactions())).
- Err(err).
- Msgf("[VerifyNewBlock] Cannot Verify New Block!!!, blockHeight %d, myHeight %d", newBlock.NumberU64(), blockChain.CurrentHeader().NumberU64())
- return errors.WithMessagef(err,
- "[VerifyNewBlock] Cannot Verify New Block!!! block-hash %s txn-count %d",
- newBlock.Hash().Hex(),
- len(newBlock.Transactions()),
- )
}
- return nil
+ utils.Logger().Error().
+ Str("blockHash", newBlock.Hash().Hex()).
+ Int("numTx", len(newBlock.Transactions())).
+ Int("numStakingTx", len(newBlock.StakingTransactions())).
+ Err(err).
+ Msgf("[VerifyNewBlock] Cannot Verify New Block!!!, blockHeight %d, myHeight %d", newBlock.NumberU64(), consensus.Blockchain().CurrentHeader().NumberU64())
+ return errors.WithMessagef(err,
+ "[VerifyNewBlock] Cannot Verify New Block!!! block-hash %s txn-count %d",
+ newBlock.Hash().Hex(),
+ len(newBlock.Transactions()),
+ )
}
+ return nil
}
diff --git a/consensus/consensus_test.go b/consensus/consensus_test.go
index 697ba49525..2fe524fdf8 100644
--- a/consensus/consensus_test.go
+++ b/consensus/consensus_test.go
@@ -18,7 +18,7 @@ import (
)
func TestConsensusInitialization(t *testing.T) {
- host, multiBLSPrivateKey, consensus, decider, err := GenerateConsensusForTesting()
+ host, multiBLSPrivateKey, consensus, _, err := GenerateConsensusForTesting()
assert.NoError(t, err)
messageSender := &MessageSender{host: host, retryTimes: int(phaseDuration.Seconds()) / RetryIntervalInSec}
@@ -30,10 +30,8 @@ func TestConsensusInitialization(t *testing.T) {
expectedTimeouts[timeoutViewChange] = viewChangeDuration
expectedTimeouts[timeoutBootstrap] = bootstrapDuration
- assert.Equal(t, decider, consensus.Decider)
assert.Equal(t, host, consensus.host)
assert.Equal(t, messageSender, consensus.msgSender)
- assert.IsType(t, make(chan struct{}), consensus.BlockNumLowChan)
// FBFTLog
assert.NotNil(t, consensus.FBFTLog())
@@ -65,9 +63,6 @@ func TestConsensusInitialization(t *testing.T) {
assert.IsType(t, make(chan slash.Record), consensus.SlashChan)
assert.NotNil(t, consensus.SlashChan)
- assert.IsType(t, make(chan ProposalType), consensus.GetReadySignal())
- assert.NotNil(t, consensus.GetReadySignal())
-
assert.IsType(t, make(chan [vdfAndSeedSize]byte), consensus.RndChannel)
assert.NotNil(t, consensus.RndChannel)
diff --git a/consensus/consensus_v2.go b/consensus/consensus_v2.go
index bc0d185bdf..0aec2537b4 100644
--- a/consensus/consensus_v2.go
+++ b/consensus/consensus_v2.go
@@ -10,23 +10,23 @@ import (
"github.com/ethereum/go-ethereum/common"
bls2 "github.com/harmony-one/bls/ffi/go/bls"
- "github.com/harmony-one/harmony/consensus/signature"
- "github.com/harmony-one/harmony/core"
- nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
- "github.com/harmony-one/harmony/internal/utils"
- "github.com/rs/zerolog"
-
msg_pb "github.com/harmony-one/harmony/api/proto/message"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/quorum"
+ "github.com/harmony-one/harmony/consensus/signature"
+ "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
vrf_bls "github.com/harmony-one/harmony/crypto/vrf/bls"
+ nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
+ "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/shard"
"github.com/harmony-one/vdf/src/vdf_go"
+ libp2p_peer "github.com/libp2p/go-libp2p/core/peer"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/rs/zerolog"
)
var (
@@ -55,7 +55,7 @@ func (consensus *Consensus) isViewChangingMode() bool {
}
// HandleMessageUpdate will update the consensus state according to received message
-func (consensus *Consensus) HandleMessageUpdate(ctx context.Context, msg *msg_pb.Message, senderKey *bls.SerializedPublicKey) error {
+func (consensus *Consensus) HandleMessageUpdate(ctx context.Context, peer libp2p_peer.ID, msg *msg_pb.Message, senderKey *bls.SerializedPublicKey) error {
consensus.mutex.Lock()
defer consensus.mutex.Unlock()
// when node is in ViewChanging mode, it still accepts normal messages into FBFTLog
@@ -91,7 +91,7 @@ func (consensus *Consensus) HandleMessageUpdate(ctx context.Context, msg *msg_pb
case t == msg_pb.MessageType_VIEWCHANGE:
fbftMsg, err = ParseViewChangeMessage(msg)
case t == msg_pb.MessageType_NEWVIEW:
- members := consensus.Decider.Participants()
+ members := consensus.decider.Participants()
fbftMsg, err = ParseNewViewMessage(msg, members)
default:
fbftMsg, err = consensus.parseFBFTMessage(msg)
@@ -138,7 +138,7 @@ func (consensus *Consensus) HandleMessageUpdate(ctx context.Context, msg *msg_pb
}
func (consensus *Consensus) finalCommit() {
- numCommits := consensus.Decider.SignersCount(quorum.Commit)
+ numCommits := consensus.decider.SignersCount(quorum.Commit)
consensus.getLogger().Info().
Int64("NumCommits", numCommits).
@@ -178,7 +178,10 @@ func (consensus *Consensus) finalCommit() {
return
}
consensus.getLogger().Info().Hex("new", commitSigAndBitmap).Msg("[finalCommit] Overriding commit signatures!!")
- consensus.Blockchain().WriteCommitSig(block.NumberU64(), commitSigAndBitmap)
+
+ if err := consensus.Blockchain().WriteCommitSig(block.NumberU64(), commitSigAndBitmap); err != nil {
+ consensus.getLogger().Warn().Err(err).Msg("[finalCommit] failed writting commit sig")
+ }
// Send committed message before block insertion.
// if leader successfully finalizes the block, send committed message to validators
@@ -277,6 +280,8 @@ func (consensus *Consensus) BlockCommitSigs(blockNum uint64) ([]byte, error) {
return nil, nil
}
lastCommits, err := consensus.Blockchain().ReadCommitSig(blockNum)
+ consensus.mutex.Lock()
+ defer consensus.mutex.Unlock()
if err != nil ||
len(lastCommits) < bls.BLSSignatureSizeInBytes {
msgs := consensus.FBFTLog().GetMessagesByTypeSeq(
@@ -300,32 +305,26 @@ func (consensus *Consensus) BlockCommitSigs(blockNum uint64) ([]byte, error) {
func (consensus *Consensus) Start(
stopChan chan struct{},
) {
+ consensus.GetLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Consensus started")
go func() {
- consensus.getLogger().Info().Time("time", time.Now()).Msg("[ConsensusMainLoop] Consensus started")
- go func() {
- ticker := time.NewTicker(250 * time.Millisecond)
- defer ticker.Stop()
- for {
- select {
- case <-stopChan:
- return
- case <-ticker.C:
- consensus.Tick()
- }
+ ticker := time.NewTicker(250 * time.Millisecond)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-stopChan:
+ return
+ case <-ticker.C:
+ consensus.Tick()
}
- }()
-
- consensus.mutex.Lock()
- consensus.consensusTimeout[timeoutBootstrap].Start()
- consensus.getLogger().Info().Msg("[ConsensusMainLoop] Start bootstrap timeout (only once)")
- // Set up next block due time.
- consensus.NextBlockDue = time.Now().Add(consensus.BlockPeriod)
- consensus.mutex.Unlock()
+ }
}()
- if consensus.dHelper != nil {
- consensus.dHelper.start()
- }
+ consensus.mutex.Lock()
+ consensus.consensusTimeout[timeoutBootstrap].Start()
+ consensus.getLogger().Info().Msg("[ConsensusMainLoop] Start bootstrap timeout (only once)")
+ // Set up next block due time.
+ consensus.NextBlockDue = time.Now().Add(consensus.BlockPeriod)
+ consensus.mutex.Unlock()
}
func (consensus *Consensus) StartChannel() {
@@ -363,11 +362,12 @@ func (consensus *Consensus) syncReadyChan() {
}
}
-func (consensus *Consensus) syncNotReadyChan() {
- consensus.getLogger().Info().Msg("[ConsensusMainLoop] syncNotReadyChan")
+func (consensus *Consensus) syncNotReadyChan(reason string) {
+ mode := consensus.current.Mode()
consensus.setBlockNum(consensus.Blockchain().CurrentHeader().Number().Uint64() + 1)
consensus.current.SetMode(Syncing)
- consensus.getLogger().Info().Msg("[ConsensusMainLoop] Node is OUT OF SYNC")
+ consensus.getLogger().Info().Msgf("[ConsensusMainLoop] syncNotReadyChan, prev %s, reason %s", mode.String(), reason)
+ consensus.getLogger().Info().Msgf("[ConsensusMainLoop] Node is OUT OF SYNC, reason: %s", reason)
consensusSyncCounterVec.With(prometheus.Labels{"consensus": "out_of_sync"}).Inc()
}
@@ -444,14 +444,10 @@ func (consensus *Consensus) BlockChannel(newBlock *types.Block) {
Int("numTxs", len(newBlock.Transactions())).
Int("numStakingTxs", len(newBlock.StakingTransactions())).
Time("startTime", startTime).
- Int64("publicKeys", consensus.Decider.ParticipantsCount()).
+ Int64("publicKeys", consensus.decider.ParticipantsCount()).
Msg("[ConsensusMainLoop] STARTING CONSENSUS")
consensus.announce(newBlock)
})
-
- if consensus.dHelper != nil {
- consensus.dHelper.start()
- }
}
// LastMileBlockIter is the iterator to iterate over the last mile blocks in consensus cache.
@@ -474,9 +470,6 @@ func (consensus *Consensus) GetLastMileBlockIter(bnStart uint64, cb func(iter *L
// GetLastMileBlockIter get the iterator of the last mile blocks starting from number bnStart
func (consensus *Consensus) getLastMileBlockIter(bnStart uint64, cb func(iter *LastMileBlockIter) error) error {
- if consensus.BlockVerifier == nil {
- return errors.New("consensus haven't initialized yet")
- }
blocks, _, err := consensus.getLastMileBlocksAndMsg(bnStart)
if err != nil {
return err
@@ -580,19 +573,19 @@ func (consensus *Consensus) preCommitAndPropose(blk *types.Block) error {
if _, err := consensus.Blockchain().InsertChain([]*types.Block{blk}, !consensus.FBFTLog().IsBlockVerified(blk.Hash())); err != nil {
switch {
case errors.Is(err, core.ErrKnownBlock):
- consensus.getLogger().Info().Msg("[preCommitAndPropose] Block already known")
+ consensus.GetLogger().Info().Msg("[preCommitAndPropose] Block already known")
default:
- consensus.getLogger().Error().Err(err).Msg("[preCommitAndPropose] Failed to add block to chain")
+ consensus.GetLogger().Error().Err(err).Msg("[preCommitAndPropose] Failed to add block to chain")
return
}
}
-
+ consensus.mutex.Lock()
consensus.getLogger().Info().Msg("[preCommitAndPropose] Start consensus timer")
consensus.consensusTimeout[timeoutConsensus].Start()
// Send signal to Node to propose the new block for consensus
consensus.getLogger().Info().Msg("[preCommitAndPropose] sending block proposal signal")
-
+ consensus.mutex.Unlock()
consensus.ReadySignal(AsyncProposal)
}()
@@ -625,10 +618,6 @@ func (consensus *Consensus) verifyLastCommitSig(lastCommitSig []byte, blk *types
// tryCatchup add the last mile block in PBFT log memory cache to blockchain.
func (consensus *Consensus) tryCatchup() error {
- // TODO: change this to a more systematic symbol
- if consensus.BlockVerifier == nil {
- return errors.New("consensus haven't finished initialization")
- }
initBN := consensus.getBlockNum()
defer consensus.postCatchup(initBN)
@@ -667,7 +656,8 @@ func (consensus *Consensus) tryCatchup() error {
func (consensus *Consensus) commitBlock(blk *types.Block, committedMsg *FBFTMessage) error {
if consensus.Blockchain().CurrentBlock().NumberU64() < blk.NumberU64() {
- if _, err := consensus.Blockchain().InsertChain([]*types.Block{blk}, !consensus.fBFTLog.IsBlockVerified(blk.Hash())); err != nil {
+ _, err := consensus.Blockchain().InsertChain([]*types.Block{blk}, !consensus.fBFTLog.IsBlockVerified(blk.Hash()))
+ if err != nil && !errors.Is(err, core.ErrKnownBlock) {
consensus.getLogger().Error().Err(err).Msg("[commitBlock] Failed to add block to chain")
return err
}
@@ -692,36 +682,41 @@ func (consensus *Consensus) commitBlock(blk *types.Block, committedMsg *FBFTMess
// rotateLeader rotates the leader to the next leader in the committee.
// This function must be called with enabled leader rotation.
-func (consensus *Consensus) rotateLeader(epoch *big.Int) {
+func (consensus *Consensus) rotateLeader(epoch *big.Int, defaultKey *bls.PublicKeyWrapper) *bls.PublicKeyWrapper {
var (
- bc = consensus.Blockchain()
- prev = consensus.getLeaderPubKey()
- leader = consensus.getLeaderPubKey()
+ bc = consensus.Blockchain()
+ leader = consensus.getLeaderPubKey()
+ curBlock = bc.CurrentBlock()
+ curNumber = curBlock.NumberU64()
+ curEpoch = curBlock.Epoch().Uint64()
)
+ if epoch.Uint64() != curEpoch {
+ return defaultKey
+ }
+ const blocksCountAliveness = 4
utils.Logger().Info().Msgf("[Rotating leader] epoch: %v rotation:%v external rotation %v", epoch.Uint64(), bc.Config().IsLeaderRotationInternalValidators(epoch), bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch))
ss, err := bc.ReadShardState(epoch)
if err != nil {
utils.Logger().Error().Err(err).Msg("Failed to read shard state")
- return
+ return defaultKey
}
committee, err := ss.FindCommitteeByID(consensus.ShardID)
if err != nil {
utils.Logger().Error().Err(err).Msg("Failed to find committee")
- return
+ return defaultKey
}
slotsCount := len(committee.Slots)
blocksPerEpoch := shard.Schedule.InstanceForEpoch(epoch).BlocksPerEpoch()
if blocksPerEpoch == 0 {
utils.Logger().Error().Msg("[Rotating leader] blocks per epoch is 0")
- return
+ return defaultKey
}
if slotsCount == 0 {
utils.Logger().Error().Msg("[Rotating leader] slots count is 0")
- return
+ return defaultKey
}
numBlocksProducedByLeader := blocksPerEpoch / uint64(slotsCount)
- rest := blocksPerEpoch % uint64(slotsCount)
- const minimumBlocksForLeaderInRow = 3
+ const minimumBlocksForLeaderInRow = blocksCountAliveness
if numBlocksProducedByLeader < minimumBlocksForLeaderInRow {
// mine no less than 3 blocks in a row
numBlocksProducedByLeader = minimumBlocksForLeaderInRow
@@ -729,15 +724,11 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) {
s := bc.LeaderRotationMeta()
if !bytes.Equal(leader.Bytes[:], s.Pub) {
// Another leader.
- return
- }
- // If it is the first validator producing blocks, it should also produce the remaining 'rest' of the blocks.
- if s.Shifts == 0 {
- numBlocksProducedByLeader += rest
+ return defaultKey
}
if s.Count < numBlocksProducedByLeader {
// Not enough blocks produced by the leader, continue producing by the same leader.
- return
+ return defaultKey
}
// Passed all checks, we can change leader.
// NthNext will move the leader to the next leader in the committee.
@@ -745,31 +736,64 @@ func (consensus *Consensus) rotateLeader(epoch *big.Int) {
var (
wasFound bool
next *bls.PublicKeyWrapper
+ offset = 1
)
- if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) {
- wasFound, next = consensus.Decider.NthNextValidator(committee.Slots, leader, 1)
- } else {
- wasFound, next = consensus.Decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, 1)
- }
- if !wasFound {
- utils.Logger().Error().Msg("Failed to get next leader")
- return
- } else {
- consensus.setLeaderPubKey(next)
- }
- if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) {
- // leader changed
- go func() {
- consensus.ReadySignal(SyncProposal)
- }()
+
+ for i := 0; i < len(committee.Slots); i++ {
+ if bc.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) {
+ wasFound, next = consensus.decider.NthNextValidator(committee.Slots, leader, offset)
+ } else {
+ wasFound, next = consensus.decider.NthNextHmy(shard.Schedule.InstanceForEpoch(epoch), leader, offset)
+ }
+ if !wasFound {
+ utils.Logger().Error().Msg("Failed to get next leader")
+ // Seems like nothing we can do here.
+ return defaultKey
+ }
+ members := consensus.decider.Participants()
+ mask := bls.NewMask(members)
+ skipped := 0
+ for i := 0; i < blocksCountAliveness; i++ {
+ header := bc.GetHeaderByNumber(curNumber - uint64(i))
+ if header == nil {
+ utils.Logger().Error().Msgf("Failed to get header by number %d", curNumber-uint64(i))
+ return defaultKey
+ }
+ // if epoch is different, we should not check this block.
+ if header.Epoch().Uint64() != curEpoch {
+ break
+ }
+ // Populate the mask with the bitmap.
+ err = mask.SetMask(header.LastCommitBitmap())
+ if err != nil {
+ utils.Logger().Err(err).Msg("Failed to set mask")
+ return defaultKey
+ }
+ ok, err := mask.KeyEnabled(next.Bytes)
+ if err != nil {
+ utils.Logger().Err(err).Msg("Failed to get key enabled")
+ return defaultKey
+ }
+ if !ok {
+ skipped++
+ }
+ }
+
+ // no signature from the next leader at all, we should skip it.
+ if skipped >= blocksCountAliveness {
+ // Next leader is not signing blocks, we should skip it.
+ offset++
+ continue
+ }
+ return next
}
+ return defaultKey
}
// SetupForNewConsensus sets the state for new consensus
func (consensus *Consensus) setupForNewConsensus(blk *types.Block, committedMsg *FBFTMessage) {
atomic.StoreUint64(&consensus.blockNum, blk.NumberU64()+1)
consensus.setCurBlockViewID(committedMsg.ViewID + 1)
- consensus.LeaderPubKey = committedMsg.SenderPubkeys[0]
var epoch *big.Int
if blk.IsLastBlockInEpoch() {
epoch = new(big.Int).Add(blk.Epoch(), common.Big1)
@@ -777,7 +801,23 @@ func (consensus *Consensus) setupForNewConsensus(blk *types.Block, committedMsg
epoch = blk.Epoch()
}
if consensus.Blockchain().Config().IsLeaderRotationInternalValidators(epoch) {
- consensus.rotateLeader(epoch)
+ if next := consensus.rotateLeader(epoch, committedMsg.SenderPubkeys[0]); next != nil {
+ prev := consensus.getLeaderPubKey()
+ consensus.setLeaderPubKey(next)
+ if consensus.isLeader() {
+ utils.Logger().Info().Msgf("We are block %d, I am the new leader %s", blk.NumberU64(), next.Bytes.Hex())
+ } else {
+ utils.Logger().Info().Msgf("We are block %d, the leader is %s", blk.NumberU64(), next.Bytes.Hex())
+ }
+ if consensus.isLeader() && !consensus.getLeaderPubKey().Object.IsEqual(prev.Object) {
+ // leader changed
+ blockPeriod := consensus.BlockPeriod
+ go func() {
+ <-time.After(blockPeriod)
+ consensus.ReadySignal(SyncProposal)
+ }()
+ }
+ }
}
// Update consensus keys at last so the change of leader status doesn't mess up normal flow
diff --git a/consensus/construct.go b/consensus/construct.go
index 10488816c7..48291a3644 100644
--- a/consensus/construct.go
+++ b/consensus/construct.go
@@ -82,7 +82,7 @@ func (consensus *Consensus) construct(
)
} else {
// TODO: use a persistent bitmap to report bitmap
- mask := bls.NewMask(consensus.Decider.Participants())
+ mask := bls.NewMask(consensus.decider.Participants())
for _, key := range priKeys {
mask.SetKey(key.Pub.Bytes, true)
}
@@ -161,7 +161,7 @@ func (consensus *Consensus) construct(
func (consensus *Consensus) constructQuorumSigAndBitmap(p quorum.Phase) []byte {
buffer := bytes.Buffer{}
// 96 bytes aggregated signature
- aggSig := consensus.Decider.AggregateVotes(p)
+ aggSig := consensus.decider.AggregateVotes(p)
buffer.Write(aggSig.Serialize())
// Bitmap
if p == quorum.Prepare {
diff --git a/consensus/construct_test.go b/consensus/construct_test.go
index 7188ebea68..c836e78224 100644
--- a/consensus/construct_test.go
+++ b/consensus/construct_test.go
@@ -81,7 +81,7 @@ func TestConstructPreparedMessage(test *testing.T) {
validatorKey := bls.SerializedPublicKey{}
validatorKey.FromLibBLSPublicKey(validatorPubKey)
validatorKeyWrapper := bls.PublicKeyWrapper{Object: validatorPubKey, Bytes: validatorKey}
- consensus.Decider.AddNewVote(
+ consensus.Decider().AddNewVote(
quorum.Prepare,
[]*bls.PublicKeyWrapper{&leaderKeyWrapper},
leaderPriKey.Sign(message),
@@ -89,7 +89,7 @@ func TestConstructPreparedMessage(test *testing.T) {
consensus.BlockNum(),
consensus.GetCurBlockViewID(),
)
- if _, err := consensus.Decider.AddNewVote(
+ if _, err := consensus.Decider().AddNewVote(
quorum.Prepare,
[]*bls.PublicKeyWrapper{&validatorKeyWrapper},
validatorPriKey.Sign(message),
diff --git a/consensus/double_sign.go b/consensus/double_sign.go
index 3a8d559fd6..144c67bff7 100644
--- a/consensus/double_sign.go
+++ b/consensus/double_sign.go
@@ -17,7 +17,7 @@ func (consensus *Consensus) checkDoubleSign(recvMsg *FBFTMessage) bool {
if consensus.couldThisBeADoubleSigner(recvMsg) {
addrSet := map[common.Address]struct{}{}
for _, pubKey2 := range recvMsg.SenderPubkeys {
- if alreadyCastBallot := consensus.Decider.ReadBallot(
+ if alreadyCastBallot := consensus.decider.ReadBallot(
quorum.Commit, pubKey2.Bytes,
); alreadyCastBallot != nil {
for _, pubKey1 := range alreadyCastBallot.SignerPubKeys {
diff --git a/consensus/downloader.go b/consensus/downloader.go
index 26755bbd25..595d07b01d 100644
--- a/consensus/downloader.go
+++ b/consensus/downloader.go
@@ -2,6 +2,7 @@ package consensus
import (
"github.com/ethereum/go-ethereum/event"
+ "github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
"github.com/pkg/errors"
)
@@ -18,12 +19,13 @@ type downloader interface {
// Set downloader set the downloader of the shard to consensus
// TODO: It will be better to move this to consensus.New and register consensus as a service
func (consensus *Consensus) SetDownloader(d downloader) {
+ consensus.mutex.Lock()
+ defer consensus.mutex.Unlock()
consensus.dHelper = newDownloadHelper(consensus, d)
}
type downloadHelper struct {
d downloader
- c *Consensus
startedCh chan struct{}
finishedCh chan struct{}
@@ -39,51 +41,43 @@ func newDownloadHelper(c *Consensus, d downloader) *downloadHelper {
finishedCh := make(chan struct{}, 1)
finishedSub := d.SubscribeDownloadFinished(finishedCh)
- return &downloadHelper{
- c: c,
+ out := &downloadHelper{
d: d,
startedCh: startedCh,
finishedCh: finishedCh,
startedSub: startedSub,
finishedSub: finishedSub,
}
+ go out.downloadStartedLoop(c)
+ go out.downloadFinishedLoop(c)
+ return out
}
-func (dh *downloadHelper) start() {
- go dh.downloadStartedLoop()
- go dh.downloadFinishedLoop()
+func (dh *downloadHelper) DownloadAsync() {
+ dh.d.DownloadAsync()
}
-func (dh *downloadHelper) close() {
- dh.startedSub.Unsubscribe()
- dh.finishedSub.Unsubscribe()
-}
-
-func (dh *downloadHelper) downloadStartedLoop() {
+func (dh *downloadHelper) downloadStartedLoop(c *Consensus) {
for {
select {
case <-dh.startedCh:
- dh.c.BlocksNotSynchronized()
+ c.BlocksNotSynchronized("downloadStartedLoop")
case err := <-dh.startedSub.Err():
- dh.c.getLogger().Info().Err(err).Msg("consensus download finished loop closed")
+ c.GetLogger().Info().Err(err).Msg("consensus download finished loop closed")
return
}
}
}
-func (dh *downloadHelper) downloadFinishedLoop() {
+func (dh *downloadHelper) downloadFinishedLoop(c *Consensus) {
for {
select {
case <-dh.finishedCh:
- err := dh.c.AddConsensusLastMile()
- if err != nil {
- dh.c.getLogger().Error().Err(err).Msg("add last mile failed")
- }
- dh.c.BlocksSynchronized()
+ c.BlocksSynchronized()
case err := <-dh.finishedSub.Err():
- dh.c.getLogger().Info().Err(err).Msg("consensus download finished loop closed")
+ c.GetLogger().Info().Err(err).Msg("consensus download finished loop closed")
return
}
}
@@ -97,7 +91,11 @@ func (consensus *Consensus) AddConsensusLastMile() error {
if block == nil {
break
}
- if _, err := consensus.Blockchain().InsertChain(types.Blocks{block}, true); err != nil {
+ _, err := consensus.Blockchain().InsertChain(types.Blocks{block}, true)
+ switch {
+ case errors.Is(err, core.ErrKnownBlock):
+ case errors.Is(err, core.ErrNotLastBlockInEpoch):
+ case err != nil:
return errors.Wrap(err, "failed to InsertChain")
}
}
@@ -107,31 +105,9 @@ func (consensus *Consensus) AddConsensusLastMile() error {
}
func (consensus *Consensus) spinUpStateSync() {
- if consensus.dHelper != nil {
- consensus.dHelper.d.DownloadAsync()
- consensus.current.SetMode(Syncing)
- for _, v := range consensus.consensusTimeout {
- v.Stop()
- }
- } else {
- select {
- case consensus.BlockNumLowChan <- struct{}{}:
- consensus.current.SetMode(Syncing)
- for _, v := range consensus.consensusTimeout {
- v.Stop()
- }
- default:
- }
- }
-}
-
-func (consensus *Consensus) spinLegacyStateSync() {
- select {
- case consensus.BlockNumLowChan <- struct{}{}:
- consensus.current.SetMode(Syncing)
- for _, v := range consensus.consensusTimeout {
- v.Stop()
- }
- default:
+ consensus.dHelper.DownloadAsync()
+ consensus.current.SetMode(Syncing)
+ for _, v := range consensus.consensusTimeout {
+ v.Stop()
}
}
diff --git a/consensus/engine/consensus_engine.go b/consensus/engine/consensus_engine.go
index 5ac2c776b0..37c4ea5901 100644
--- a/consensus/engine/consensus_engine.go
+++ b/consensus/engine/consensus_engine.go
@@ -4,9 +4,11 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/reward"
"github.com/harmony-one/harmony/core/state"
+ "github.com/harmony-one/harmony/core/state/snapshot"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/params"
@@ -23,6 +25,9 @@ type ChainReader interface {
// Config retrieves the blockchain's chain configuration.
Config() *params.ChainConfig
+ // TrieDB returns trie database
+ TrieDB() *trie.Database
+
// TrieNode retrieves a blob of data associated with a trie node
// either from ephemeral in-memory cache, or from persistent storage.
TrieNode(hash common.Hash) ([]byte, error)
@@ -62,6 +67,9 @@ type ChainReader interface {
// GetBlock retrieves a block from the database by hash and number.
GetBlock(hash common.Hash, number uint64) *types.Block
+ // Snapshots returns the blockchain snapshot tree.
+ Snapshots() *snapshot.Tree
+
// ReadShardState retrieves sharding state given the epoch number.
// This api reads the shard state cached or saved on the chaindb.
// Thus, only should be used to read the shard state of the current chain.
diff --git a/consensus/fbft_log.go b/consensus/fbft_log.go
index 982aecab75..cec74e314b 100644
--- a/consensus/fbft_log.go
+++ b/consensus/fbft_log.go
@@ -3,6 +3,8 @@ package consensus
import (
"encoding/binary"
"fmt"
+ "hash/crc32"
+ "strconv"
"sync"
"github.com/ethereum/go-ethereum/common"
@@ -36,6 +38,19 @@ type FBFTMessage struct {
Verified bool
}
+func (m *FBFTMessage) Hash() []byte {
+ // Hash returns hash of the struct
+
+ c := crc32.NewIEEE()
+ c.Write([]byte(strconv.FormatUint(uint64(m.MessageType), 10)))
+ c.Write([]byte(strconv.FormatUint(m.ViewID, 10)))
+ c.Write([]byte(strconv.FormatUint(m.BlockNum, 10)))
+ c.Write(m.BlockHash[:])
+ c.Write(m.Block[:])
+ c.Write(m.Payload[:])
+ return c.Sum(nil)
+}
+
// String ..
func (m *FBFTMessage) String() string {
sender := ""
diff --git a/consensus/leader.go b/consensus/leader.go
index 82ba3069bb..5740493024 100644
--- a/consensus/leader.go
+++ b/consensus/leader.go
@@ -16,6 +16,7 @@ import (
"github.com/harmony-one/harmony/p2p"
)
+// announce fires leader
func (consensus *Consensus) announce(block *types.Block) {
blockHash := block.Hash()
@@ -61,7 +62,7 @@ func (consensus *Consensus) announce(block *types.Block) {
continue
}
- if _, err := consensus.Decider.AddNewVote(
+ if _, err := consensus.decider.AddNewVote(
quorum.Prepare,
[]*bls.PublicKeyWrapper{key.Pub},
key.Pri.SignHash(consensus.blockHash[:]),
@@ -92,6 +93,7 @@ func (consensus *Consensus) announce(block *types.Block) {
consensus.switchPhase("Announce", FBFTPrepare)
}
+// this method is called for each validator sent their vote message
func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) {
// TODO(audit): make FBFT lookup using map instead of looping through all items.
if !consensus.fBFTLog.HasMatchingViewAnnounce(
@@ -110,7 +112,7 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) {
prepareBitmap := consensus.prepareBitmap
// proceed only when the message is not received before
for _, signer := range recvMsg.SenderPubkeys {
- signed := consensus.Decider.ReadBallot(quorum.Prepare, signer.Bytes)
+ signed := consensus.decider.ReadBallot(quorum.Prepare, signer.Bytes)
if signed != nil {
consensus.getLogger().Debug().
Str("validatorPubKey", signer.Bytes.Hex()).
@@ -119,14 +121,14 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) {
}
}
- if consensus.Decider.IsQuorumAchieved(quorum.Prepare) {
+ if consensus.decider.IsQuorumAchieved(quorum.Prepare) {
// already have enough signatures
consensus.getLogger().Debug().
Interface("validatorPubKeys", recvMsg.SenderPubkeys).
Msg("[OnPrepare] Received Additional Prepare Message")
return
}
- signerCount := consensus.Decider.SignersCount(quorum.Prepare)
+ signerCount := consensus.decider.SignersCount(quorum.Prepare)
//// Read - End
// Check BLS signature for the multi-sig
@@ -159,11 +161,11 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) {
consensus.getLogger().Debug().
Int64("NumReceivedSoFar", signerCount).
- Int64("PublicKeys", consensus.Decider.ParticipantsCount()).
+ Int64("PublicKeys", consensus.decider.ParticipantsCount()).
Msg("[OnPrepare] Received New Prepare Signature")
//// Write - Start
- if _, err := consensus.Decider.AddNewVote(
+ if _, err := consensus.decider.AddNewVote(
quorum.Prepare, recvMsg.SenderPubkeys,
&sign, recvMsg.BlockHash,
recvMsg.BlockNum, recvMsg.ViewID,
@@ -179,7 +181,7 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) {
//// Write - End
//// Read - Start
- if consensus.Decider.IsQuorumAchieved(quorum.Prepare) {
+ if consensus.decider.IsQuorumAchieved(quorum.Prepare) {
// NOTE Let it handle its own logs
if err := consensus.didReachPrepareQuorum(); err != nil {
return
@@ -189,6 +191,7 @@ func (consensus *Consensus) onPrepare(recvMsg *FBFTMessage) {
//// Read - End
}
+// this method is called by leader
func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) {
//// Read - Start
if !consensus.isRightBlockNumAndViewID(recvMsg) {
@@ -196,7 +199,7 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) {
}
// proceed only when the message is not received before
for _, signer := range recvMsg.SenderPubkeys {
- signed := consensus.Decider.ReadBallot(quorum.Commit, signer.Bytes)
+ signed := consensus.decider.ReadBallot(quorum.Commit, signer.Bytes)
if signed != nil {
consensus.getLogger().Debug().
Str("validatorPubKey", signer.Bytes.Hex()).
@@ -208,9 +211,9 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) {
commitBitmap := consensus.commitBitmap
// has to be called before verifying signature
- quorumWasMet := consensus.Decider.IsQuorumAchieved(quorum.Commit)
+ quorumWasMet := consensus.decider.IsQuorumAchieved(quorum.Commit)
- signerCount := consensus.Decider.SignersCount(quorum.Commit)
+ signerCount := consensus.decider.SignersCount(quorum.Commit)
//// Read - End
// Verify the signature on commitPayload is correct
@@ -264,7 +267,7 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) {
return
}
*/
- if _, err := consensus.Decider.AddNewVote(
+ if _, err := consensus.decider.AddNewVote(
quorum.Commit, recvMsg.SenderPubkeys,
&sign, recvMsg.BlockHash,
recvMsg.BlockNum, recvMsg.ViewID,
@@ -282,15 +285,7 @@ func (consensus *Consensus) onCommit(recvMsg *FBFTMessage) {
//// Read - Start
viewID := consensus.getCurBlockViewID()
- if consensus.Decider.IsAllSigsCollected() {
- logger.Info().Msg("[OnCommit] 100% Enough commits received")
- consensus.finalCommit()
-
- consensus.msgSender.StopRetry(msg_pb.MessageType_PREPARED)
- return
- }
-
- quorumIsMet := consensus.Decider.IsQuorumAchieved(quorum.Commit)
+ quorumIsMet := consensus.decider.IsQuorumAchieved(quorum.Commit)
//// Read - End
if !quorumWasMet && quorumIsMet {
diff --git a/consensus/quorum/one-node-staked-vote.go b/consensus/quorum/one-node-staked-vote.go
index e3a45540a1..2532f9691e 100644
--- a/consensus/quorum/one-node-staked-vote.go
+++ b/consensus/quorum/one-node-staked-vote.go
@@ -98,19 +98,19 @@ func (v *stakedVoteWeight) AddNewVote(
additionalVotePower = additionalVotePower.Add(votingPower)
}
- tallyQuorum := func() *tallyAndQuorum {
- switch p {
- case Prepare:
- return v.voteTally.Prepare
- case Commit:
- return v.voteTally.Commit
- case ViewChange:
- return v.voteTally.ViewChange
- default:
- // Should not happen
- return nil
- }
- }()
+ var tallyQuorum *tallyAndQuorum
+ switch p {
+ case Prepare:
+ tallyQuorum = v.voteTally.Prepare
+ case Commit:
+ tallyQuorum = v.voteTally.Commit
+ case ViewChange:
+ tallyQuorum = v.voteTally.ViewChange
+ default:
+ // Should not happen
+ return nil, errors.New("stakedVoteWeight not cache this phase")
+ }
+
tallyQuorum.tally = tallyQuorum.tally.Add(additionalVotePower)
t := v.QuorumThreshold()
@@ -163,20 +163,6 @@ func (v *stakedVoteWeight) IsQuorumAchievedByMask(mask *bls_cosi.Mask) bool {
return (*currentTotalPower).GT(threshold)
}
-func (v *stakedVoteWeight) currentTotalPower(p Phase) (*numeric.Dec, error) {
- switch p {
- case Prepare:
- return &v.voteTally.Prepare.tally, nil
- case Commit:
- return &v.voteTally.Commit.tally, nil
- case ViewChange:
- return &v.voteTally.ViewChange.tally, nil
- default:
- // Should not happen
- return nil, errors.New("wrong phase is provided")
- }
-}
-
// ComputeTotalPowerByMask computes the total power indicated by bitmap mask
func (v *stakedVoteWeight) computeTotalPowerByMask(mask *bls_cosi.Mask) *numeric.Dec {
currentTotal := numeric.ZeroDec()
diff --git a/consensus/quorum/quorum.go b/consensus/quorum/quorum.go
index aaeaab236d..3930abef12 100644
--- a/consensus/quorum/quorum.go
+++ b/consensus/quorum/quorum.go
@@ -77,7 +77,7 @@ type ParticipantTracker interface {
ParticipantsCount() int64
// NthNextValidator returns key for next validator. It assumes external validators and leader rotation.
NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper)
- NthNextHmy(shardingconfig.Instance, *bls.PublicKeyWrapper, int) (bool, *bls.PublicKeyWrapper)
+ NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper)
NthNextHmyExt(shardingconfig.Instance, *bls.PublicKeyWrapper, int) (bool, *bls.PublicKeyWrapper)
FirstParticipant(shardingconfig.Instance) *bls.PublicKeyWrapper
UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper)
diff --git a/consensus/quorum/thread_safe_decider.go b/consensus/quorum/thread_safe_decider.go
new file mode 100644
index 0000000000..9999325f67
--- /dev/null
+++ b/consensus/quorum/thread_safe_decider.go
@@ -0,0 +1,179 @@
+package quorum
+
+import (
+ "math/big"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ bls_core "github.com/harmony-one/bls/ffi/go/bls"
+ "github.com/harmony-one/harmony/consensus/votepower"
+ "github.com/harmony-one/harmony/crypto/bls"
+ shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
+ "github.com/harmony-one/harmony/multibls"
+ "github.com/harmony-one/harmony/numeric"
+ "github.com/harmony-one/harmony/shard"
+)
+
+var _ Decider = threadSafeDeciderImpl{}
+
+type threadSafeDeciderImpl struct {
+ mu *sync.RWMutex
+ decider Decider
+}
+
+func NewThreadSafeDecider(decider Decider, mu *sync.RWMutex) Decider {
+ return threadSafeDeciderImpl{
+ mu: mu,
+ decider: decider,
+ }
+}
+
+func (a threadSafeDeciderImpl) String() string {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.String()
+}
+
+func (a threadSafeDeciderImpl) Participants() multibls.PublicKeys {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.Participants()
+}
+
+func (a threadSafeDeciderImpl) IndexOf(key bls.SerializedPublicKey) int {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.IndexOf(key)
+}
+
+func (a threadSafeDeciderImpl) ParticipantsCount() int64 {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.ParticipantsCount()
+}
+
+func (a threadSafeDeciderImpl) NthNextValidator(slotList shard.SlotList, pubKey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.NthNextValidator(slotList, pubKey, next)
+}
+
+func (a threadSafeDeciderImpl) NthNextHmy(instance shardingconfig.Instance, pubkey *bls.PublicKeyWrapper, next int) (bool, *bls.PublicKeyWrapper) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.NthNextHmy(instance, pubkey, next)
+}
+
+func (a threadSafeDeciderImpl) NthNextHmyExt(instance shardingconfig.Instance, wrapper *bls.PublicKeyWrapper, i int) (bool, *bls.PublicKeyWrapper) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.NthNextHmyExt(instance, wrapper, i)
+}
+
+func (a threadSafeDeciderImpl) FirstParticipant(instance shardingconfig.Instance) *bls.PublicKeyWrapper {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.FirstParticipant(instance)
+}
+
+func (a threadSafeDeciderImpl) UpdateParticipants(pubKeys, allowlist []bls.PublicKeyWrapper) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ a.decider.UpdateParticipants(pubKeys, allowlist)
+}
+
+func (a threadSafeDeciderImpl) submitVote(p Phase, pubkeys []bls.SerializedPublicKey, sig *bls_core.Sign, headerHash common.Hash, height, viewID uint64) (*votepower.Ballot, error) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.submitVote(p, pubkeys, sig, headerHash, height, viewID)
+}
+
+func (a threadSafeDeciderImpl) SignersCount(phase Phase) int64 {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.SignersCount(phase)
+}
+
+func (a threadSafeDeciderImpl) reset(phases []Phase) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ a.decider.reset(phases)
+}
+
+func (a threadSafeDeciderImpl) ReadBallot(p Phase, pubkey bls.SerializedPublicKey) *votepower.Ballot {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.ReadBallot(p, pubkey)
+}
+
+func (a threadSafeDeciderImpl) TwoThirdsSignersCount() int64 {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.TwoThirdsSignersCount()
+}
+
+func (a threadSafeDeciderImpl) AggregateVotes(p Phase) *bls_core.Sign {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.AggregateVotes(p)
+}
+
+func (a threadSafeDeciderImpl) SetVoters(subCommittee *shard.Committee, epoch *big.Int) (*TallyResult, error) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.SetVoters(subCommittee, epoch)
+}
+
+func (a threadSafeDeciderImpl) Policy() Policy {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.Policy()
+}
+
+func (a threadSafeDeciderImpl) AddNewVote(p Phase, pubkeys []*bls.PublicKeyWrapper, sig *bls_core.Sign, headerHash common.Hash, height, viewID uint64) (*votepower.Ballot, error) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.AddNewVote(p, pubkeys, sig, headerHash, height, viewID)
+}
+
+func (a threadSafeDeciderImpl) IsQuorumAchievedByMask(mask *bls.Mask) bool {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.IsQuorumAchievedByMask(mask)
+}
+
+func (a threadSafeDeciderImpl) QuorumThreshold() numeric.Dec {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.QuorumThreshold()
+}
+
+func (a threadSafeDeciderImpl) IsAllSigsCollected() bool {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.IsAllSigsCollected()
+}
+
+func (a threadSafeDeciderImpl) ResetPrepareAndCommitVotes() {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ a.decider.ResetPrepareAndCommitVotes()
+}
+
+func (a threadSafeDeciderImpl) ResetViewChangeVotes() {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ a.decider.ResetViewChangeVotes()
+}
+
+func (a threadSafeDeciderImpl) CurrentTotalPower(p Phase) (*numeric.Dec, error) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.CurrentTotalPower(p)
+}
+
+func (a threadSafeDeciderImpl) IsQuorumAchieved(p Phase) bool {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ return a.decider.IsQuorumAchieved(p)
+}
diff --git a/consensus/threshold.go b/consensus/threshold.go
index e611eaedcd..339f6d2a7e 100644
--- a/consensus/threshold.go
+++ b/consensus/threshold.go
@@ -57,7 +57,7 @@ func (consensus *Consensus) didReachPrepareQuorum() error {
continue
}
- if _, err := consensus.Decider.AddNewVote(
+ if _, err := consensus.decider.AddNewVote(
quorum.Commit,
[]*bls.PublicKeyWrapper{key.Pub},
key.Pri.SignHash(commitPayload),
diff --git a/consensus/validator.go b/consensus/validator.go
index 0506f4359d..fa5cdac921 100644
--- a/consensus/validator.go
+++ b/consensus/validator.go
@@ -65,7 +65,7 @@ func (consensus *Consensus) onAnnounce(msg *msg_pb.Message) {
_, err := consensus.ValidateNewBlock(recvMsg)
if err == nil {
consensus.GetLogger().Info().
- Msg("[Announce] Block verified")
+ Msgf("[Announce] Block verified %d", recvMsg.BlockNum)
}
}()
}
@@ -125,11 +125,6 @@ func (consensus *Consensus) validateNewBlock(recvMsg *FBFTMessage) (*types.Block
Hex("blockHash", recvMsg.BlockHash[:]).
Msg("[validateNewBlock] Prepared message and block added")
- if consensus.BlockVerifier == nil {
- consensus.getLogger().Debug().Msg("[validateNewBlock] consensus received message before init. Ignoring")
- return nil, errors.New("nil block verifier")
- }
-
if err := consensus.verifyBlock(&blockObj); err != nil {
consensus.getLogger().Error().Err(err).Msg("[validateNewBlock] Block verification failed")
return nil, errors.Errorf("Block verification failed: %s", err.Error())
@@ -204,12 +199,12 @@ func (consensus *Consensus) onPrepared(recvMsg *FBFTMessage) {
// check validity of prepared signature
blockHash := recvMsg.BlockHash
- aggSig, mask, err := consensus.readSignatureBitmapPayload(recvMsg.Payload, 0, consensus.Decider.Participants())
+ aggSig, mask, err := consensus.readSignatureBitmapPayload(recvMsg.Payload, 0, consensus.decider.Participants())
if err != nil {
consensus.getLogger().Error().Err(err).Msg("ReadSignatureBitmapPayload failed!")
return
}
- if !consensus.Decider.IsQuorumAchievedByMask(mask) {
+ if !consensus.decider.IsQuorumAchievedByMask(mask) {
consensus.getLogger().Warn().Msgf("[OnPrepared] Quorum Not achieved.")
return
}
@@ -340,7 +335,7 @@ func (consensus *Consensus) onCommitted(recvMsg *FBFTMessage) {
return
}
- aggSig, mask, err := chain.DecodeSigBitmap(sigBytes, bitmap, consensus.Decider.Participants())
+ aggSig, mask, err := chain.DecodeSigBitmap(sigBytes, bitmap, consensus.decider.Participants())
if err != nil {
consensus.getLogger().Error().Err(err).Msg("[OnCommitted] readSignatureBitmapPayload failed")
return
diff --git a/consensus/view_change.go b/consensus/view_change.go
index efc1760e84..1171b073e0 100644
--- a/consensus/view_change.go
+++ b/consensus/view_change.go
@@ -187,7 +187,7 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64, committee *shard.Com
// it can still sync with other validators.
if curHeader.IsLastBlockInEpoch() {
consensus.getLogger().Info().Msg("[getNextLeaderKey] view change in the first block of new epoch")
- lastLeaderPubKey = consensus.Decider.FirstParticipant(shard.Schedule.InstanceForEpoch(epoch))
+ lastLeaderPubKey = consensus.decider.FirstParticipant(shard.Schedule.InstanceForEpoch(epoch))
}
}
}
@@ -204,18 +204,18 @@ func (consensus *Consensus) getNextLeaderKey(viewID uint64, committee *shard.Com
var next *bls.PublicKeyWrapper
if blockchain != nil && blockchain.Config().IsLeaderRotationInternalValidators(epoch) {
if blockchain.Config().IsLeaderRotationExternalValidatorsAllowed(epoch) {
- wasFound, next = consensus.Decider.NthNextValidator(
+ wasFound, next = consensus.decider.NthNextValidator(
committee.Slots,
lastLeaderPubKey,
gap)
} else {
- wasFound, next = consensus.Decider.NthNextHmy(
+ wasFound, next = consensus.decider.NthNextHmy(
shard.Schedule.InstanceForEpoch(epoch),
lastLeaderPubKey,
gap)
}
} else {
- wasFound, next = consensus.Decider.NthNextHmy(
+ wasFound, next = consensus.decider.NthNextHmy(
shard.Schedule.InstanceForEpoch(epoch),
lastLeaderPubKey,
gap)
@@ -281,7 +281,7 @@ func (consensus *Consensus) startViewChange() {
defer consensus.consensusTimeout[timeoutViewChange].Start()
// update the dictionary key if the viewID is first time received
- members := consensus.Decider.Participants()
+ members := consensus.decider.Participants()
consensus.vc.AddViewIDKeyIfNotExist(nextViewID, members)
// init my own payload
@@ -290,7 +290,9 @@ func (consensus *Consensus) startViewChange() {
nextViewID,
consensus.getBlockNum(),
consensus.priKey,
- members); err != nil {
+ members,
+ consensus.verifyBlock,
+ ); err != nil {
consensus.getLogger().Error().Err(err).Msg("[startViewChange] Init Payload Error")
}
@@ -384,10 +386,10 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) {
return
}
- if consensus.Decider.IsQuorumAchievedByMask(consensus.vc.GetViewIDBitmap(recvMsg.ViewID)) {
+ if consensus.decider.IsQuorumAchievedByMask(consensus.vc.GetViewIDBitmap(recvMsg.ViewID)) {
consensus.getLogger().Info().
- Int64("have", consensus.Decider.SignersCount(quorum.ViewChange)).
- Int64("need", consensus.Decider.TwoThirdsSignersCount()).
+ Int64("have", consensus.decider.SignersCount(quorum.ViewChange)).
+ Int64("need", consensus.decider.TwoThirdsSignersCount()).
Interface("SenderPubkeys", recvMsg.SenderPubkeys).
Str("newLeaderKey", newLeaderKey.Bytes.Hex()).
Msg("[onViewChange] Received Enough View Change Messages")
@@ -402,20 +404,23 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) {
senderKey := recvMsg.SenderPubkeys[0]
// update the dictionary key if the viewID is first time received
- members := consensus.Decider.Participants()
+ members := consensus.decider.Participants()
consensus.vc.AddViewIDKeyIfNotExist(recvMsg.ViewID, members)
// do it once only per viewID/Leader
- if err := consensus.vc.InitPayload(consensus.fBFTLog,
+ if err := consensus.vc.InitPayload(
+ consensus.fBFTLog,
recvMsg.ViewID,
recvMsg.BlockNum,
consensus.priKey,
- members); err != nil {
+ members,
+ consensus.verifyBlock,
+ ); err != nil {
consensus.getLogger().Error().Err(err).Msg("[onViewChange] Init Payload Error")
return
}
- err = consensus.vc.ProcessViewChangeMsg(consensus.fBFTLog, consensus.Decider, recvMsg)
+ err = consensus.vc.ProcessViewChangeMsg(consensus.fBFTLog, consensus.decider, recvMsg, consensus.verifyBlock)
if err != nil {
consensus.getLogger().Error().Err(err).
Uint64("viewID", recvMsg.ViewID).
@@ -426,7 +431,7 @@ func (consensus *Consensus) onViewChange(recvMsg *FBFTMessage) {
}
// received enough view change messages, change state to normal consensus
- if consensus.Decider.IsQuorumAchievedByMask(consensus.vc.GetViewIDBitmap(recvMsg.ViewID)) && consensus.isViewChangingMode() {
+ if consensus.decider.IsQuorumAchievedByMask(consensus.vc.GetViewIDBitmap(recvMsg.ViewID)) && consensus.isViewChangingMode() {
// no previous prepared message, go straight to normal mode
// and start proposing new block
if consensus.vc.IsM1PayloadEmpty() {
@@ -483,14 +488,14 @@ func (consensus *Consensus) onNewView(recvMsg *FBFTMessage) {
return
}
- preparedBlock, err := consensus.vc.VerifyNewViewMsg(recvMsg)
+ preparedBlock, err := consensus.vc.VerifyNewViewMsg(recvMsg, consensus.verifyBlock)
if err != nil {
consensus.getLogger().Warn().Err(err).Msg("[onNewView] Verify New View Msg Failed")
return
}
m3Mask := recvMsg.M3Bitmap
- if !consensus.Decider.IsQuorumAchievedByMask(m3Mask) {
+ if !consensus.decider.IsQuorumAchievedByMask(m3Mask) {
consensus.getLogger().Warn().
Msgf("[onNewView] Quorum Not achieved")
return
@@ -502,7 +507,7 @@ func (consensus *Consensus) onNewView(recvMsg *FBFTMessage) {
utils.CountOneBits(m3Mask.Bitmap) > utils.CountOneBits(m2Mask.Bitmap)) {
// m1 is not empty, check it's valid
blockHash := recvMsg.Payload[:32]
- aggSig, mask, err := consensus.readSignatureBitmapPayload(recvMsg.Payload, 32, consensus.Decider.Participants())
+ aggSig, mask, err := consensus.readSignatureBitmapPayload(recvMsg.Payload, 32, consensus.decider.Participants())
if err != nil {
consensus.getLogger().Error().Err(err).
Msg("[onNewView] ReadSignatureBitmapPayload Failed")
@@ -579,5 +584,5 @@ func (consensus *Consensus) resetViewChangeState() {
Msg("[ResetViewChangeState] Resetting view change state")
consensus.current.SetMode(Normal)
consensus.vc.Reset()
- consensus.Decider.ResetViewChangeVotes()
+ consensus.decider.ResetViewChangeVotes()
}
diff --git a/consensus/view_change_construct.go b/consensus/view_change_construct.go
index de430694ae..5d25531757 100644
--- a/consensus/view_change_construct.go
+++ b/consensus/view_change_construct.go
@@ -46,7 +46,6 @@ type viewChange struct {
m1Payload []byte // message payload for type m1 := |vcBlockHash|prepared_agg_sigs|prepared_bitmap|, new leader only need one
- verifyBlock VerifyBlockFunc
viewChangeDuration time.Duration
}
@@ -152,7 +151,7 @@ func (vc *viewChange) GetM3Bitmap(viewID uint64) ([]byte, []byte) {
}
// VerifyNewViewMsg returns true if the new view message is valid
-func (vc *viewChange) VerifyNewViewMsg(recvMsg *FBFTMessage) (*types.Block, error) {
+func (vc *viewChange) VerifyNewViewMsg(recvMsg *FBFTMessage, verifyBlock func(block *types.Block) error) (*types.Block, error) {
if recvMsg.M3AggSig == nil || recvMsg.M3Bitmap == nil {
return nil, errors.New("[VerifyNewViewMsg] M3AggSig or M3Bitmap is nil")
}
@@ -215,7 +214,7 @@ func (vc *viewChange) VerifyNewViewMsg(recvMsg *FBFTMessage) (*types.Block, erro
if !bytes.Equal(preparedBlockHash[:], blockHash) {
return nil, errors.New("[VerifyNewViewMsg] Prepared block hash doesn't match msg block hash")
}
- if err := vc.verifyBlock(preparedBlock); err != nil {
+ if err := verifyBlock(preparedBlock); err != nil {
return nil, err
}
return preparedBlock, nil
@@ -239,6 +238,7 @@ func (vc *viewChange) ProcessViewChangeMsg(
fbftlog *FBFTLog,
decider quorum.Decider,
recvMsg *FBFTMessage,
+ verifyBlock func(block *types.Block) error,
) error {
preparedBlock := &types.Block{}
if !recvMsg.HasSingleSender() {
@@ -256,7 +256,7 @@ func (vc *viewChange) ProcessViewChangeMsg(
if err := rlp.DecodeBytes(recvMsg.Block, preparedBlock); err != nil {
return err
}
- if err := vc.verifyBlock(preparedBlock); err != nil {
+ if err := verifyBlock(preparedBlock); err != nil {
return err
}
_, ok := vc.bhpSigs[recvMsg.ViewID][senderKeyStr]
@@ -381,6 +381,7 @@ func (vc *viewChange) InitPayload(
blockNum uint64,
privKeys multibls.PrivateKeys,
members multibls.PublicKeys,
+ verifyBlock func(block *types.Block) error,
) error {
// m1 or m2 init once per viewID/key.
// m1 and m2 are mutually exclusive.
@@ -405,7 +406,7 @@ func (vc *viewChange) InitPayload(
hasBlock := false
if preparedMsg != nil {
if preparedBlock := fbftlog.GetBlockByHash(preparedMsg.BlockHash); preparedBlock != nil {
- if err := vc.verifyBlock(preparedBlock); err == nil {
+ if err := verifyBlock(preparedBlock); err == nil {
vc.getLogger().Info().Uint64("viewID", viewID).Uint64("blockNum", blockNum).Int("size", binary.Size(preparedBlock)).Msg("[InitPayload] add my M1 (prepared) type messaage")
msgToSign := append(preparedMsg.BlockHash[:], preparedMsg.Payload...)
for _, key := range privKeys {
diff --git a/consensus/view_change_test.go b/consensus/view_change_test.go
index 96d8fbc865..bbc6999445 100644
--- a/consensus/view_change_test.go
+++ b/consensus/view_change_test.go
@@ -94,7 +94,7 @@ func TestGetNextLeaderKeyShouldSucceed(t *testing.T) {
_, _, consensus, _, err := GenerateConsensusForTesting()
assert.NoError(t, err)
- assert.Equal(t, int64(0), consensus.Decider.ParticipantsCount())
+ assert.Equal(t, int64(0), consensus.Decider().ParticipantsCount())
blsKeys := []*bls_core.PublicKey{}
wrappedBLSKeys := []bls.PublicKeyWrapper{}
@@ -111,8 +111,8 @@ func TestGetNextLeaderKeyShouldSucceed(t *testing.T) {
wrappedBLSKeys = append(wrappedBLSKeys, wrapped)
}
- consensus.Decider.UpdateParticipants(wrappedBLSKeys, []bls.PublicKeyWrapper{})
- assert.Equal(t, keyCount, consensus.Decider.ParticipantsCount())
+ consensus.Decider().UpdateParticipants(wrappedBLSKeys, []bls.PublicKeyWrapper{})
+ assert.Equal(t, keyCount, consensus.Decider().ParticipantsCount())
consensus.LeaderPubKey = &wrappedBLSKeys[0]
nextKey := consensus.getNextLeaderKey(uint64(1), nil)
diff --git a/core/block_validator.go b/core/block_validator.go
index 4e097b94d0..7006068321 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -56,7 +56,7 @@ func NewBlockValidator(blockchain BlockChain) *BlockValidator {
func (v *BlockValidator) ValidateBody(block *types.Block) error {
// Check whether the block's known, and if not, that it's linkable
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
- return ErrKnownBlock
+ return errors.WithMessage(ErrKnownBlock, "validate body: has block and state")
}
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
diff --git a/core/blockchain.go b/core/blockchain.go
index 24272a91ef..f47133bad8 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -6,6 +6,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/consensus/reward"
@@ -46,22 +47,26 @@ type Options struct {
type BlockChain interface {
// ValidateNewBlock validates new block.
ValidateNewBlock(block *types.Block, beaconChain BlockChain) error
- // SetHead rewinds the local chain to a new head. In the case of headers, everything
- // above the new head will be deleted and the new one set. In the case of blocks
- // though, the head may be further rewound if block bodies are missing (non-archive
- // nodes after a fast sync).
- SetHead(head uint64) error
// ShardID returns the shard Id of the blockchain.
ShardID() uint32
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
CurrentBlock() *types.Block
+ // CurrentFastBlock retrieves the current fast-sync head block of the canonical
+ // block is retrieved from the blockchain's internal cache.
+ CurrentFastBlock() *types.Block
+ // Validator returns the current validator.
+ Validator() Validator
// Processor returns the current processor.
Processor() Processor
// State returns a new mutable state based on the current HEAD block.
State() (*state.DB, error)
// StateAt returns a new mutable state based on a particular point in time.
StateAt(root common.Hash) (*state.DB, error)
+ // Snapshots returns the blockchain snapshot tree.
+ Snapshots() *snapshot.Tree
+ // TrieDB returns trie database
+ TrieDB() *trie.Database
// HasBlock checks if a block is fully present in the database or not.
HasBlock(hash common.Hash, number uint64) bool
// HasState checks if state trie is fully present in the database or not.
@@ -100,10 +105,12 @@ type BlockChain interface {
// Rollback is designed to remove a chain of links from the database that aren't
// certain enough to be valid.
Rollback(chain []common.Hash) error
+ // writeHeadBlock writes a new head block
+ WriteHeadBlock(block *types.Block) error
// WriteBlockWithoutState writes only the block and its metadata to the database,
// but does not write any state. This is used to construct competing side forks
// up to the point where they exceed the canonical total difficulty.
- WriteBlockWithoutState(block *types.Block, td *big.Int) (err error)
+ WriteBlockWithoutState(block *types.Block) (err error)
// WriteBlockWithState writes the block and all associated state to the database.
WriteBlockWithState(
block *types.Block, receipts []*types.Receipt,
@@ -121,7 +128,10 @@ type BlockChain interface {
//
// After insertion is done, all accumulated events will be fired.
InsertChain(chain types.Blocks, verifyHeaders bool) (int, error)
- // LeaderRotationMeta returns info about leader rotation.
+ // InsertReceiptChain attempts to complete an already existing header chain with
+ // transaction and receipt data.
+ InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error)
+ // LeaderRotationMeta returns the number of continuous blocks by the leader.
LeaderRotationMeta() LeaderRotationMeta
// BadBlocks returns a list of the last 'bad blocks' that
// the client has seen on the network.
@@ -162,8 +172,6 @@ type BlockChain interface {
WriteShardStateBytes(db rawdb.DatabaseWriter,
epoch *big.Int, shardState []byte,
) (*shard.State, error)
- // WriteHeadBlock writes head block.
- WriteHeadBlock(block *types.Block) error
// ReadCommitSig retrieves the commit signature on a block.
ReadCommitSig(blockNum uint64) ([]byte, error)
// WriteCommitSig saves the commits signatures signed on a block.
@@ -174,20 +182,8 @@ type BlockChain interface {
GetVrfByNumber(number uint64) []byte
// ChainDb returns the database.
ChainDb() ethdb.Database
- // GetEpochBlockNumber returns the first block number of the given epoch.
- GetEpochBlockNumber(epoch *big.Int) (*big.Int, error)
- // StoreEpochBlockNumber stores the given epoch-first block number.
- StoreEpochBlockNumber(
- epoch *big.Int, blockNum *big.Int,
- ) error
// ReadEpochVrfBlockNums retrieves block numbers with valid VRF for the specified epoch.
ReadEpochVrfBlockNums(epoch *big.Int) ([]uint64, error)
- // WriteEpochVrfBlockNums saves block numbers with valid VRF for the specified epoch.
- WriteEpochVrfBlockNums(epoch *big.Int, vrfNumbers []uint64) error
- // ReadEpochVdfBlockNum retrieves block number with valid VDF for the specified epoch.
- ReadEpochVdfBlockNum(epoch *big.Int) (*big.Int, error)
- // WriteEpochVdfBlockNum saves block number with valid VDF for the specified epoch.
- WriteEpochVdfBlockNum(epoch *big.Int, blockNum *big.Int) error
// WriteCrossLinks saves the hashes of crosslinks by shardID and blockNum combination key.
WriteCrossLinks(batch rawdb.DatabaseWriter, cls []types.CrossLink) error
// DeleteCrossLinks removes the hashes of crosslinks by shardID and blockNum combination key.
diff --git a/core/blockchain_impl.go b/core/blockchain_impl.go
index 3b5bc6bb10..d71371986e 100644
--- a/core/blockchain_impl.go
+++ b/core/blockchain_impl.go
@@ -31,6 +31,8 @@ import (
"sync/atomic"
"time"
+ "github.com/pkg/errors"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
@@ -66,7 +68,6 @@ import (
"github.com/harmony-one/harmony/staking/slash"
staking "github.com/harmony-one/harmony/staking/types"
lru "github.com/hashicorp/golang-lru"
- "github.com/pkg/errors"
)
var (
@@ -90,8 +91,9 @@ var (
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
// ErrNoGenesis is the error when there is no genesis.
- ErrNoGenesis = errors.New("Genesis not found in chain")
- ErrEmptyChain = errors.New("empty chain")
+ ErrNoGenesis = errors.New("Genesis not found in chain")
+ ErrEmptyChain = errors.New("empty chain")
+ ErrNotLastBlockInEpoch = errors.New("not last block in epoch")
// errExceedMaxPendingSlashes ..
errExceedMaxPendingSlashes = errors.New("exceeed max pending slashes")
errNilEpoch = errors.New("nil epoch for voting power computation")
@@ -103,8 +105,6 @@ const (
bodyCacheLimit = 128
blockCacheLimit = 128
receiptsCacheLimit = 32
- maxFutureBlocks = 16
- maxTimeFutureBlocks = 30
badBlockLimit = 10
triesInRedis = 1000
shardCacheLimit = 10
@@ -184,9 +184,7 @@ type BlockChainImpl struct {
scope event.SubscriptionScope
genesisBlock *types.Block
- mu sync.RWMutex // global mutex for locking chain operations
chainmu sync.RWMutex // blockchain insertion lock
- procmu sync.RWMutex // block processor lock
pendingCrossLinksMutex sync.RWMutex // pending crosslinks lock
pendingSlashingCandidatesMU sync.RWMutex // pending slashing candidates
@@ -198,7 +196,6 @@ type BlockChainImpl struct {
bodyRLPCache *lru.Cache // Cache for the most recent block bodies in RLP encoded format
receiptsCache *lru.Cache // Cache for the most recent receipts per block
blockCache *lru.Cache // Cache for the most recent entire blocks
- futureBlocks *lru.Cache // future blocks are blocks added for later processing
shardStateCache *lru.Cache
lastCommitsCache *lru.Cache
epochCache *lru.Cache // Cache epoch number → first block number
@@ -270,7 +267,6 @@ func newBlockChainWithOptions(
bodyRLPCache, _ := lru.New(bodyCacheLimit)
receiptsCache, _ := lru.New(receiptsCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
- futureBlocks, _ := lru.New(maxFutureBlocks)
badBlocks, _ := lru.New(badBlockLimit)
shardCache, _ := lru.New(shardCacheLimit)
commitsCache, _ := lru.New(commitsCacheLimit)
@@ -296,7 +292,6 @@ func newBlockChainWithOptions(
bodyRLPCache: bodyRLPCache,
receiptsCache: receiptsCache,
blockCache: blockCache,
- futureBlocks: futureBlocks,
shardStateCache: shardCache,
lastCommitsCache: commitsCache,
epochCache: epochCache,
@@ -359,7 +354,11 @@ func newBlockChainWithOptions(
NoBuild: bc.cacheConfig.SnapshotNoBuild,
AsyncBuild: !bc.cacheConfig.SnapshotWait,
}
- bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Hash())
+ fmt.Println("loading/generating snapshot...")
+ utils.Logger().Info().
+ Str("Root", head.Root().Hex()).
+ Msg("loading/generating snapshot")
+ bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root())
}
curHeader := bc.CurrentBlock().Header()
@@ -373,9 +372,6 @@ func newBlockChainWithOptions(
return nil, errors.WithMessage(err, "failed to write pre-image start end blocks")
}
}
-
- // Take ownership of this particular state
- go bc.update()
return bc, nil
}
@@ -587,14 +583,14 @@ func (bc *BlockChainImpl) loadLastState() error {
if head == (common.Hash{}) {
// Corrupt or empty database, init from scratch
utils.Logger().Warn().Msg("Empty database, resetting chain")
- return bc.Reset()
+ return bc.reset()
}
// Make sure the entire head block is available
currentBlock := bc.GetBlockByHash(head)
if currentBlock == nil {
// Corrupt or empty database, init from scratch
utils.Logger().Warn().Str("hash", head.Hex()).Msg("Head block missing, resetting chain")
- return bc.Reset()
+ return bc.reset()
}
// Make sure the state associated with the block is available
if _, err := state.New(currentBlock.Root(), bc.stateCache, bc.snaps); err != nil {
@@ -664,12 +660,9 @@ func (bc *BlockChainImpl) loadLastState() error {
return nil
}
-func (bc *BlockChainImpl) SetHead(head uint64) error {
+func (bc *BlockChainImpl) setHead(head uint64) error {
utils.Logger().Warn().Uint64("target", head).Msg("Rewinding blockchain")
- bc.mu.Lock()
- defer bc.mu.Unlock()
-
// Rewind the header chain, deleting all block bodies until then
delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) error {
return rawdb.DeleteBody(db, hash, num)
@@ -684,7 +677,6 @@ func (bc *BlockChainImpl) SetHead(head uint64) error {
bc.bodyRLPCache.Purge()
bc.receiptsCache.Purge()
bc.blockCache.Purge()
- bc.futureBlocks.Purge()
bc.shardStateCache.Purge()
// Rewind the block chain, ensuring we don't end up with a stateless head block
@@ -742,9 +734,12 @@ func (bc *BlockChainImpl) CurrentFastBlock() *types.Block {
return bc.currentFastBlock.Load().(*types.Block)
}
+// Validator returns the current validator.
+func (bc *BlockChainImpl) Validator() Validator {
+ return bc.validator
+}
+
func (bc *BlockChainImpl) Processor() Processor {
- bc.procmu.RLock()
- defer bc.procmu.RUnlock()
return bc.processor
}
@@ -761,17 +756,15 @@ func (bc *BlockChainImpl) Snapshots() *snapshot.Tree {
return bc.snaps
}
-func (bc *BlockChainImpl) Reset() error {
- return bc.ResetWithGenesisBlock(bc.genesisBlock)
+func (bc *BlockChainImpl) reset() error {
+ return bc.resetWithGenesisBlock(bc.genesisBlock)
}
-func (bc *BlockChainImpl) ResetWithGenesisBlock(genesis *types.Block) error {
+func (bc *BlockChainImpl) resetWithGenesisBlock(genesis *types.Block) error {
// Dump the entire block chain and purge the caches
- if err := bc.SetHead(0); err != nil {
+ if err := bc.setHead(0); err != nil {
return err
}
- bc.mu.Lock()
- defer bc.mu.Unlock()
// Prepare the genesis block and reinitialise the chain
if err := rawdb.WriteBlock(bc.db, genesis); err != nil {
@@ -793,6 +786,20 @@ func (bc *BlockChainImpl) ResetWithGenesisBlock(genesis *types.Block) error {
return nil
}
+func (bc *BlockChainImpl) repairRecreateStateTries(head **types.Block) error {
+ for {
+ blk := bc.GetBlockByNumber((*head).NumberU64() + 1)
+ if blk != nil {
+ _, _, _, err := bc.insertChain([]*types.Block{blk}, true)
+ if err != nil {
+ return err
+ }
+ *head = blk
+ continue
+ }
+ }
+}
+
// repair tries to repair the current blockchain by rolling back the current block
// until one with associated state is found. This is needed to fix incomplete db
// writes caused either by crashes/power outages, or simply non-committed tries.
@@ -800,6 +807,16 @@ func (bc *BlockChainImpl) ResetWithGenesisBlock(genesis *types.Block) error {
// This method only rolls back the current block. The current header and current
// fast block are left intact.
func (bc *BlockChainImpl) repair(head **types.Block) error {
+ if err := bc.repairValidatorsAndCommitSigs(head); err != nil {
+ return errors.WithMessage(err, "failed to repair validators and commit sigs")
+ }
+ if err := bc.repairRecreateStateTries(head); err != nil {
+ return errors.WithMessage(err, "failed to recreate state tries")
+ }
+ return nil
+}
+
+func (bc *BlockChainImpl) repairValidatorsAndCommitSigs(head **types.Block) error {
valsToRemove := map[common.Address]struct{}{}
for {
// Abort if we've rewound to a head block that does have associated state
@@ -808,6 +825,9 @@ func (bc *BlockChainImpl) repair(head **types.Block) error {
Str("number", (*head).Number().String()).
Str("hash", (*head).Hash().Hex()).
Msg("Rewound blockchain to past state")
+ if err := rawdb.WriteHeadBlockHash(bc.db, (*head).Hash()); err != nil {
+ return errors.WithMessagef(err, "failed to write head block hash number %d", (*head).NumberU64())
+ }
return bc.removeInValidatorList(valsToRemove)
}
// Repair last commit sigs
@@ -815,6 +835,14 @@ func (bc *BlockChainImpl) repair(head **types.Block) error {
sigAndBitMap := append(lastSig[:], (*head).Header().LastCommitBitmap()...)
bc.WriteCommitSig((*head).NumberU64()-1, sigAndBitMap)
+ err := rawdb.DeleteBlock(bc.db, (*head).Hash(), (*head).NumberU64())
+ if err != nil {
+ return errors.WithMessagef(err, "failed to delete block %d", (*head).NumberU64())
+ }
+ if err := rawdb.WriteHeadBlockHash(bc.db, (*head).ParentHash()); err != nil {
+ return errors.WithMessagef(err, "failed to write head block hash number %d", (*head).NumberU64()-1)
+ }
+
// Otherwise rewind one block and recheck state availability there
for _, stkTxn := range (*head).StakingTransactions() {
if stkTxn.StakingType() == staking.DirectiveCreateValidator {
@@ -862,8 +890,8 @@ func (bc *BlockChainImpl) Export(w io.Writer) error {
// ExportN writes a subset of the active chain to the given writer.
func (bc *BlockChainImpl) ExportN(w io.Writer, first uint64, last uint64) error {
- bc.mu.RLock()
- defer bc.mu.RUnlock()
+ bc.chainmu.RLock()
+ defer bc.chainmu.RUnlock()
if first > last {
return fmt.Errorf("export failed: first (%d) is greater than last (%d)", first, last)
@@ -908,6 +936,23 @@ func (bc *BlockChainImpl) writeHeadBlock(block *types.Block) error {
if err := rawdb.WriteHeadBlockHash(batch, block.Hash()); err != nil {
return err
}
+ if err := rawdb.WriteHeadHeaderHash(batch, block.Hash()); err != nil {
+ return err
+ }
+ if err := rawdb.WriteHeaderNumber(batch, block.Hash(), block.NumberU64()); err != nil {
+ return err
+ }
+
+ isNewEpoch := block.IsLastBlockInEpoch()
+ if isNewEpoch {
+ epoch := block.Header().Epoch()
+ nextEpoch := epoch.Add(epoch, common.Big1)
+ if err := rawdb.WriteShardStateBytes(batch, nextEpoch, block.Header().ShardState()); err != nil {
+ utils.Logger().Error().Err(err).Msg("failed to store shard state")
+ return err
+ }
+ }
+
if err := batch.Write(); err != nil {
return err
}
@@ -1122,6 +1167,11 @@ func (bc *BlockChainImpl) GetUnclesInChain(b *types.Block, length int) []*block.
return uncles
}
+// TrieDB returns trie database
+func (bc *BlockChainImpl) TrieDB() *trie.Database {
+ return bc.stateCache.TrieDB()
+}
+
// TrieNode retrieves a blob of data associated with a trie node (or code hash)
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChainImpl) TrieNode(hash common.Hash) ([]byte, error) {
@@ -1211,23 +1261,6 @@ func (bc *BlockChainImpl) Stop() {
utils.Logger().Info().Msg("Blockchain manager stopped")
}
-func (bc *BlockChainImpl) procFutureBlocks() {
- blocks := make([]*types.Block, 0, bc.futureBlocks.Len())
- for _, hash := range bc.futureBlocks.Keys() {
- if block, exist := bc.futureBlocks.Peek(hash); exist {
- blocks = append(blocks, block.(*types.Block))
- }
- }
- if len(blocks) > 0 {
- types.BlockBy(types.Number).Sort(blocks)
-
- // Insert one by one as chain insertion needs contiguous ancestry between blocks
- for i := range blocks {
- bc.InsertChain(blocks[i:i+1], true /* verifyHeaders */)
- }
- }
-}
-
// WriteStatus status of write
type WriteStatus byte
@@ -1239,8 +1272,8 @@ const (
)
func (bc *BlockChainImpl) Rollback(chain []common.Hash) error {
- bc.mu.Lock()
- defer bc.mu.Unlock()
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
valsToRemove := map[common.Address]struct{}{}
for i := len(chain) - 1; i >= 0; i-- {
@@ -1349,8 +1382,6 @@ func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts ty
// InsertReceiptChain attempts to complete an already existing header chain with
// transaction and receipt data.
-// Deprecated: no usages of this function found.
-// TODO: should be removed
func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(blockChain); i++ {
@@ -1380,11 +1411,14 @@ func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptCha
receipts := receiptChain[i]
// Short circuit insertion if shutting down or processing failed
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
- return 0, nil
+ return 0, fmt.Errorf("Premature abort during blocks processing")
}
- // Short circuit if the owner header is unknown
+ // Add header if the owner header is unknown
if !bc.HasHeader(block.Hash(), block.NumberU64()) {
- return 0, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
+ if err := rawdb.WriteHeader(batch, block.Header()); err != nil {
+ return 0, err
+ }
+ // return 0, fmt.Errorf("containing header #%d [%x…] unknown", block.Number(), block.Hash().Bytes()[:4])
}
// Skip if the entire data is already known
if bc.HasBlock(block.Hash(), block.NumberU64()) {
@@ -1409,6 +1443,17 @@ func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptCha
return 0, err
}
+ isNewEpoch := block.IsLastBlockInEpoch()
+ if isNewEpoch {
+ epoch := block.Header().Epoch()
+ nextEpoch := epoch.Add(epoch, common.Big1)
+ err := rawdb.WriteShardStateBytes(batch, nextEpoch, block.Header().ShardState())
+ if err != nil {
+ utils.Logger().Error().Err(err).Msg("failed to store shard state")
+ return 0, err
+ }
+ }
+
stats.processed++
if batch.ValueSize() >= ethdb.IdealBatchSize {
@@ -1427,17 +1472,9 @@ func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptCha
}
// Update the head fast sync block if better
- bc.mu.Lock()
head := blockChain[len(blockChain)-1]
- if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
- currentFastBlock := bc.CurrentFastBlock()
- if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
- rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
- bc.currentFastBlock.Store(head)
- headFastBlockGauge.Update(int64(head.NumberU64()))
- }
- }
- bc.mu.Unlock()
+ rawdb.WriteHeadFastBlockHash(bc.db, head.Hash())
+ bc.currentFastBlock.Store(head)
utils.Logger().Info().
Int32("count", stats.processed).
@@ -1449,18 +1486,15 @@ func (bc *BlockChainImpl) InsertReceiptChain(blockChain types.Blocks, receiptCha
Int32("ignored", stats.ignored).
Msg("Imported new block receipts")
- return 0, nil
+ return int(stats.processed), nil
}
var lastWrite uint64
-func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
+func (bc *BlockChainImpl) WriteBlockWithoutState(block *types.Block) (err error) {
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
- if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
- return err
- }
if err := rawdb.WriteBlock(bc.db, block); err != nil {
return err
}
@@ -1475,10 +1509,6 @@ func (bc *BlockChainImpl) WriteBlockWithState(
paid reward.Reader,
state *state.DB,
) (status WriteStatus, err error) {
- // Make sure no inconsistent state is leaked during insertion
- bc.mu.Lock()
- defer bc.mu.Unlock()
-
currentBlock := bc.CurrentBlock()
if currentBlock == nil {
return NonStatTy, errors.New("Current block is nil")
@@ -1619,7 +1649,6 @@ func (bc *BlockChainImpl) WriteBlockWithState(
return NonStatTy, errors.Wrap(err, "writeHeadBlock")
}
- bc.futureBlocks.Remove(block.Hash())
return CanonStatTy, nil
}
@@ -1641,6 +1670,8 @@ func (bc *BlockChainImpl) InsertChain(chain types.Blocks, verifyHeaders bool) (i
}
prevHash := bc.CurrentBlock().Hash()
+ bc.chainmu.Lock()
+ defer bc.chainmu.Unlock()
n, events, logs, err := bc.insertChain(chain, verifyHeaders)
bc.PostChainEvents(events, logs)
if err == nil {
@@ -1694,9 +1725,6 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i
}
}
- bc.chainmu.Lock()
- defer bc.chainmu.Unlock()
-
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
// acquiring.
@@ -1745,31 +1773,16 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i
err = NewBlockValidator(bc).ValidateBody(block)
}
switch {
- case err == ErrKnownBlock:
- // Block and state both already known. However if the current block is below
- // this number we did a rollback and we should reimport it nonetheless.
- if bc.CurrentBlock().NumberU64() >= block.NumberU64() {
- stats.ignored++
- continue
- }
+ case errors.Is(err, ErrKnownBlock):
+ return i, events, coalescedLogs, err
case err == consensus_engine.ErrFutureBlock:
- // Allow up to MaxFuture second in the future blocks. If this limit is exceeded
- // the chain is discarded and processed at a later time if given.
- max := big.NewInt(time.Now().Unix() + maxTimeFutureBlocks)
- if block.Time().Cmp(max) > 0 {
- return i, events, coalescedLogs, fmt.Errorf("future block: %v > %v", block.Time(), max)
- }
- bc.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
+ return i, events, coalescedLogs, err
- case err == consensus_engine.ErrUnknownAncestor && bc.futureBlocks.Contains(block.ParentHash()):
- bc.futureBlocks.Add(block.Hash(), block)
- stats.queued++
- continue
+ case errors.Is(err, consensus_engine.ErrUnknownAncestor):
+ return i, events, coalescedLogs, err
- case err == consensus_engine.ErrPrunedAncestor:
+ case errors.Is(err, consensus_engine.ErrPrunedAncestor):
// TODO: add fork choice mechanism
// Block competing with the canonical chain, store in the db, but don't process
// until the competitor TD goes above the canonical TD
@@ -1796,9 +1809,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i
// Prune in case non-empty winner chain
if len(winner) > 0 {
// Import all the pruned blocks to make the state available
- bc.chainmu.Unlock()
_, evs, logs, err := bc.insertChain(winner, true /* verifyHeaders */)
- bc.chainmu.Lock()
events, coalescedLogs = evs, logs
if err != nil {
@@ -1900,7 +1911,7 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i
switch status {
case CanonStatTy:
- logger.Info().Msg("Inserted new block")
+ logger.Info().Msgf("Inserted new block s: %d e: %d n:%d", block.ShardID(), block.Epoch().Uint64(), block.NumberU64())
coalescedLogs = append(coalescedLogs, logs...)
blockInsertTimer.UpdateSince(bstart)
events = append(events, ChainEvent{block, block.Hash(), logs})
@@ -1933,10 +1944,10 @@ func (bc *BlockChainImpl) insertChain(chain types.Blocks, verifyHeaders bool) (i
// insertStats tracks and reports on block insertion.
type insertStats struct {
- queued, processed, ignored int
- usedGas uint64
- lastIndex int
- startTime mclock.AbsTime
+ queued, processed int
+ usedGas uint64
+ lastIndex int
+ startTime mclock.AbsTime
}
// statsReportLimit is the time limit during import and export after which we
@@ -1975,9 +1986,6 @@ func (st *insertStats) report(chain []*types.Block, index int, cache common.Stor
if st.queued > 0 {
context = context.Int("queued", st.queued)
}
- if st.ignored > 0 {
- context = context.Int("ignored", st.ignored)
- }
logger := context.Logger()
logger.Info().Msg("Imported new chain segment")
@@ -2018,19 +2026,6 @@ func (bc *BlockChainImpl) PostChainEvents(events []interface{}, logs []*types.Lo
}
}
-func (bc *BlockChainImpl) update() {
- futureTimer := time.NewTicker(5 * time.Second)
- defer futureTimer.Stop()
- for {
- select {
- case <-futureTimer.C:
- bc.procFutureBlocks()
- case <-bc.quit:
- return
- }
- }
-}
-
// BadBlock ..
type BadBlock struct {
Block *types.Block
@@ -2100,35 +2095,6 @@ Error: %v
}
}
-// InsertHeaderChain attempts to insert the given header chain in to the local
-// chain, possibly creating a reorg. If an error is returned, it will return the
-// index number of the failing header as well an error describing what went wrong.
-//
-// The verify parameter can be used to fine tune whether nonce verification
-// should be done or not. The reason behind the optional check is because some
-// of the header retrieval mechanisms already need to verify nonces, as well as
-// because nonces can be verified sparsely, not needing to check each.
-func (bc *BlockChainImpl) InsertHeaderChain(chain []*block.Header, checkFreq int) (int, error) {
- start := time.Now()
- if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
- return i, err
- }
-
- // Make sure only one thread manipulates the chain at once
- bc.chainmu.Lock()
- defer bc.chainmu.Unlock()
-
- whFunc := func(header *block.Header) error {
- bc.mu.Lock()
- defer bc.mu.Unlock()
-
- _, err := bc.hc.WriteHeader(header)
- return err
- }
-
- return bc.hc.InsertHeaderChain(chain, whFunc, start)
-}
-
func (bc *BlockChainImpl) CurrentHeader() *block.Header {
return bc.hc.CurrentHeader()
}
diff --git a/core/blockchain_leader_rotation.go b/core/blockchain_leader_rotation.go
index b7cdef5190..8b2683780b 100644
--- a/core/blockchain_leader_rotation.go
+++ b/core/blockchain_leader_rotation.go
@@ -14,10 +14,9 @@ import (
// LeaderRotationMeta contains information about leader rotation
type LeaderRotationMeta struct {
- Pub []byte // bls public key of previous block miner
- Epoch uint64 // epoch number of previously inserted block
- Count uint64 // quantity of continuous blocks inserted by the same leader
- Shifts uint64 // number of leader shifts, shift happens when leader changes
+ Pub []byte // bls public key of previous block miner
+ Epoch uint64 // epoch number of previously inserted block
+ Count uint64 // quantity of continuous blocks inserted by the same leader
}
// ShortString returns string representation of the struct
@@ -28,8 +27,6 @@ func (a LeaderRotationMeta) ShortString() string {
s.WriteString(strconv.FormatUint(a.Epoch, 10))
s.WriteString(" ")
s.WriteString(strconv.FormatUint(a.Count, 10))
- s.WriteString(" ")
- s.WriteString(strconv.FormatUint(a.Shifts, 10))
return s.String()
}
@@ -39,17 +36,15 @@ func (a LeaderRotationMeta) Hash() []byte {
c.Write(a.Pub)
c.Write([]byte(strconv.FormatUint(a.Epoch, 10)))
c.Write([]byte(strconv.FormatUint(a.Count, 10)))
- c.Write([]byte(strconv.FormatUint(a.Shifts, 10)))
return c.Sum(nil)
}
// Clone returns a copy of the struct
func (a LeaderRotationMeta) Clone() LeaderRotationMeta {
return LeaderRotationMeta{
- Pub: append([]byte{}, a.Pub...),
- Epoch: a.Epoch,
- Count: a.Count,
- Shifts: a.Shifts,
+ Pub: append([]byte{}, a.Pub...),
+ Epoch: a.Epoch,
+ Count: a.Count,
}
}
@@ -109,19 +104,10 @@ func processRotationMeta(epoch uint64, blockPubKey bls.SerializedPublicKey, s Le
} else {
s.Count = 1
}
- // we should increase shifts if the leader has changed.
- if !bytes.Equal(s.Pub, blockPubKey[:]) {
- s.Shifts++
- }
- // but set to zero if new
- if s.Epoch != epoch {
- s.Shifts = 0
- }
s.Epoch = epoch
return LeaderRotationMeta{
- Pub: blockPubKey[:],
- Epoch: s.Epoch,
- Count: s.Count,
- Shifts: s.Shifts,
+ Pub: blockPubKey[:],
+ Epoch: s.Epoch,
+ Count: s.Count,
}
}
diff --git a/core/blockchain_leader_rotation_test.go b/core/blockchain_leader_rotation_test.go
index 047dbdd636..e964d39d77 100644
--- a/core/blockchain_leader_rotation_test.go
+++ b/core/blockchain_leader_rotation_test.go
@@ -12,46 +12,27 @@ var k1 = bls.SerializedPublicKey{1, 2, 3}
func TestRotationMetaProcess(t *testing.T) {
t.Run("same_leader_increase_count", func(t *testing.T) {
rs := processRotationMeta(1, bls.SerializedPublicKey{}, LeaderRotationMeta{
- Pub: bls.SerializedPublicKey{}.Bytes(),
- Epoch: 1,
- Count: 1,
- Shifts: 1,
+ Pub: bls.SerializedPublicKey{}.Bytes(),
+ Epoch: 1,
+ Count: 1,
})
require.Equal(t, LeaderRotationMeta{
- Pub: bls.SerializedPublicKey{}.Bytes(),
- Epoch: 1,
- Count: 2,
- Shifts: 1,
- }, rs)
- })
-
- t.Run("new_leader_increase_shifts", func(t *testing.T) {
- rs := processRotationMeta(1, k1, LeaderRotationMeta{
- Pub: bls.SerializedPublicKey{}.Bytes(),
- Epoch: 1,
- Count: 1,
- Shifts: 1,
- })
- require.Equal(t, LeaderRotationMeta{
- Pub: k1.Bytes(),
- Epoch: 1,
- Count: 1,
- Shifts: 2,
+ Pub: bls.SerializedPublicKey{}.Bytes(),
+ Epoch: 1,
+ Count: 2,
}, rs)
})
t.Run("new_epoch_reset_count", func(t *testing.T) {
rs := processRotationMeta(2, k1, LeaderRotationMeta{
- Pub: bls.SerializedPublicKey{}.Bytes(),
- Epoch: 1,
- Count: 1,
- Shifts: 1,
+ Pub: bls.SerializedPublicKey{}.Bytes(),
+ Epoch: 1,
+ Count: 1,
})
require.Equal(t, LeaderRotationMeta{
- Pub: k1.Bytes(),
- Epoch: 2,
- Count: 1,
- Shifts: 0,
+ Pub: k1.Bytes(),
+ Epoch: 2,
+ Count: 1,
}, rs)
})
}
diff --git a/core/blockchain_stub.go b/core/blockchain_stub.go
index 804b48a00a..437bc32e77 100644
--- a/core/blockchain_stub.go
+++ b/core/blockchain_stub.go
@@ -8,6 +8,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/consensus/reward"
@@ -48,6 +49,10 @@ func (a Stub) CurrentBlock() *types.Block {
return nil
}
+func (a Stub) CurrentFastBlock() *types.Block {
+ return nil
+}
+
func (a Stub) Validator() Validator {
return nil
}
@@ -64,6 +69,14 @@ func (a Stub) StateAt(common.Hash) (*state.DB, error) {
return nil, errors.Errorf("method StateAt not implemented for %s", a.Name)
}
+func (a Stub) Snapshots() *snapshot.Tree {
+ return nil
+}
+
+func (a Stub) TrieDB() *trie.Database {
+ return nil
+}
+
func (a Stub) TrieNode(hash common.Hash) ([]byte, error) {
return []byte{}, errors.Errorf("method TrieNode not implemented for %s", a.Name)
}
@@ -111,7 +124,7 @@ func (a Stub) Rollback(chain []common.Hash) error {
return errors.Errorf("method Rollback not implemented for %s", a.Name)
}
-func (a Stub) WriteBlockWithoutState(block *types.Block, td *big.Int) (err error) {
+func (a Stub) WriteBlockWithoutState(block *types.Block) (err error) {
return errors.Errorf("method WriteBlockWithoutState not implemented for %s", a.Name)
}
@@ -127,6 +140,10 @@ func (a Stub) InsertChain(chain types.Blocks, verifyHeaders bool) (int, error) {
return 0, errors.Errorf("method InsertChain not implemented for %s", a.Name)
}
+func (a Stub) InsertReceiptChain(blockChain types.Blocks, receiptChain []types.Receipts) (int, error) {
+ return 0, errors.Errorf("method InsertReceiptChain not implemented for %s", a.Name)
+}
+
func (a Stub) BadBlocks() []BadBlock {
return nil
}
diff --git a/core/epochchain.go b/core/epochchain.go
index 7a3c40677b..2dab284713 100644
--- a/core/epochchain.go
+++ b/core/epochchain.go
@@ -124,7 +124,7 @@ func (bc *EpochChain) InsertChain(blocks types.Blocks, _ bool) (int, error) {
}()
for i, block := range blocks {
if !block.IsLastBlockInEpoch() {
- return i, errors.New("block is not last block in epoch")
+ return i, ErrNotLastBlockInEpoch
}
sig, bitmap, err := chain.ParseCommitSigAndBitmap(block.GetCurrentCommitSig())
if err != nil {
@@ -166,7 +166,8 @@ func (bc *EpochChain) InsertChain(blocks types.Blocks, _ bool) (int, error) {
se1()
se2()
utils.Logger().Info().
- Msgf("[EPOCHSYNC] Added block %d %s", block.NumberU64(), block.Hash().Hex())
+ Msgf("[EPOCHSYNC] Added block %d, epoch %d, %s", block.NumberU64(), block.Epoch().Uint64(), block.Hash().Hex())
+
}
return 0, nil
}
diff --git a/core/headerchain.go b/core/headerchain.go
index a902d5a124..4f5e8a066c 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -18,13 +18,11 @@ package core
import (
crand "crypto/rand"
- "errors"
"fmt"
"math"
"math/big"
mrand "math/rand"
"sync/atomic"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
@@ -260,55 +258,6 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*block.Header, checkFreq int)
return 0, nil
}
-// InsertHeaderChain attempts to insert the given header chain in to the local
-// chain, possibly creating a reorg. If an error is returned, it will return the
-// index number of the failing header as well an error describing what went wrong.
-//
-// The verify parameter can be used to fine tune whether nonce verification
-// should be done or not. The reason behind the optional check is because some
-// of the header retrieval mechanisms already need to verfy nonces, as well as
-// because nonces can be verified sparsely, not needing to check each.
-func (hc *HeaderChain) InsertHeaderChain(chain []*block.Header, writeHeader WhCallback, start time.Time) (int, error) {
- // Collect some import statistics to report on
- stats := struct{ processed, ignored int }{}
- // All headers passed verification, import them into the database
- for i, header := range chain {
- // Short circuit insertion if shutting down
- if hc.procInterrupt() {
- utils.Logger().Debug().Msg("Premature abort during headers import")
- return i, errors.New("aborted")
- }
- // If the header's already known, skip it, otherwise store
- if hc.HasHeader(header.Hash(), header.Number().Uint64()) {
- stats.ignored++
- continue
- }
- if err := writeHeader(header); err != nil {
- return i, err
- }
- stats.processed++
- }
- // Report some public statistics so the user has a clue what's going on
- last := chain[len(chain)-1]
-
- context := utils.Logger().With().
- Int("count", stats.processed).
- Str("elapsed", common.PrettyDuration(time.Since(start)).String()).
- Str("number", last.Number().String()).
- Str("hash", last.Hash().Hex())
-
- if timestamp := time.Unix(last.Time().Int64(), 0); time.Since(timestamp) > time.Minute {
- context = context.Str("age", common.PrettyAge(timestamp).String())
- }
- if stats.ignored > 0 {
- context = context.Int("ignored", stats.ignored)
- }
- logger := context.Logger()
- logger.Info().Msg("Imported new block headers")
-
- return 0, nil
-}
-
// GetBlockHashesFromHash retrieves a number of block hashes starting at a given
// hash, fetching towards the genesis block.
func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash {
diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go
index 72ce358e29..b01dc09655 100644
--- a/core/rawdb/accessors_chain.go
+++ b/core/rawdb/accessors_chain.go
@@ -597,14 +597,17 @@ func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
}
// WriteLastPivotNumber stores the number of the last pivot block.
-func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
+func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) error {
enc, err := rlp.EncodeToBytes(pivot)
if err != nil {
utils.Logger().Error().Err(err).Msg("Failed to encode pivot block number")
+ return err
}
if err := db.Put(lastPivotKey, enc); err != nil {
utils.Logger().Error().Err(err).Msg("Failed to store pivot block number")
+ return err
}
+ return nil
}
// ReadTxIndexTail retrieves the number of oldest indexed block
diff --git a/core/rawdb/accessors_offchain.go b/core/rawdb/accessors_offchain.go
index dd43299034..05a2321a26 100644
--- a/core/rawdb/accessors_offchain.go
+++ b/core/rawdb/accessors_offchain.go
@@ -22,7 +22,7 @@ func ReadShardState(
data, err := db.Get(shardStateKey(epoch))
if err != nil {
return nil, errors.Errorf(
- MsgNoShardStateFromDB, "epoch: %d", epoch,
+ MsgNoShardStateFromDB, "epoch: %d", epoch.Uint64(),
)
}
ss, err2 := shard.DecodeWrapper(data)
@@ -43,7 +43,7 @@ func WriteShardStateBytes(db DatabaseWriter, epoch *big.Int, data []byte) error
}
utils.Logger().Info().
Str("epoch", epoch.String()).
- Int("size", len(data)).Msg("wrote sharding state")
+ Int("size", len(data)).Msgf("wrote sharding state, epoch %d", epoch.Uint64())
return nil
}
diff --git a/core/state_processor.go b/core/state_processor.go
index 2141fe0cf1..fb7290c077 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -22,8 +22,6 @@ import (
"math/big"
"time"
- lru "github.com/hashicorp/golang-lru"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
@@ -40,6 +38,7 @@ import (
"github.com/harmony-one/harmony/staking/effective"
"github.com/harmony-one/harmony/staking/slash"
staking "github.com/harmony-one/harmony/staking/types"
+ lru "github.com/hashicorp/golang-lru"
"github.com/pkg/errors"
)
@@ -310,17 +309,15 @@ func ApplyTransaction(bc ChainContext, author *common.Address, gp *GasPool, stat
// Apply the transaction to the current state (included in the env)
result, err := ApplyMessage(vmenv, msg, gp)
if err != nil {
- if err != nil {
- to := ""
- if m := msg.To(); m != nil {
- to = m.Hex()
- }
- balance := ""
- if a := statedb.GetBalance(msg.From()); a != nil {
- balance = a.String()
- }
- return nil, nil, nil, 0, errors.Wrapf(err, "apply failed from='%s' to='%s' balance='%s'", msg.From().Hex(), to, balance)
+ to := ""
+ if m := msg.To(); m != nil {
+ to = m.Hex()
+ }
+ balance := ""
+ if a := statedb.GetBalance(msg.From()); a != nil {
+ balance = a.String()
}
+ return nil, nil, nil, 0, errors.Wrapf(err, "apply failed from='%s' to='%s' balance='%s'", msg.From().Hex(), to, balance)
}
// Update the state with pending changes
var root []byte
diff --git a/go.mod b/go.mod
index 8644ba7bd4..ac5fecc538 100644
--- a/go.mod
+++ b/go.mod
@@ -14,7 +14,7 @@ require (
github.com/coinbase/rosetta-sdk-go v0.7.0
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v1.8.0
- github.com/ethereum/go-ethereum v1.11.2
+ github.com/ethereum/go-ethereum v1.13.4
github.com/go-redis/redis/v8 v8.11.5
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.3
@@ -38,7 +38,7 @@ require (
github.com/pborman/uuid v1.2.0
github.com/pelletier/go-toml v1.9.5
github.com/pkg/errors v0.9.1
- github.com/prometheus/client_golang v1.14.0
+ github.com/prometheus/client_golang v1.17.0
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
github.com/rjeczalik/notify v0.9.2
github.com/rs/cors v1.7.0
@@ -52,14 +52,14 @@ require (
github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee
go.uber.org/ratelimit v0.1.0
go.uber.org/zap v1.24.0
- golang.org/x/crypto v0.9.0
- golang.org/x/net v0.10.0 // indirect
- golang.org/x/sync v0.2.0
- golang.org/x/sys v0.8.0 // indirect
+ golang.org/x/crypto v0.14.0
+ golang.org/x/net v0.17.0 // indirect
+ golang.org/x/sync v0.4.0
+ golang.org/x/sys v0.13.0 // indirect
golang.org/x/time v0.3.0
- golang.org/x/tools v0.9.3 // indirect
+ golang.org/x/tools v0.14.0 // indirect
google.golang.org/grpc v1.55.0
- google.golang.org/protobuf v1.30.0
+ google.golang.org/protobuf v1.31.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6
@@ -68,35 +68,37 @@ require (
require (
github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b
+ github.com/grafana/pyroscope-go v1.0.4
github.com/holiman/bloomfilter/v2 v2.0.3
github.com/ledgerwatch/erigon-lib v0.0.0-20230607152933-42c9c28cac68
github.com/ledgerwatch/log/v3 v3.8.0
github.com/olekukonko/tablewriter v0.0.5
+ golang.org/x/exp v0.0.0-20231006140011-7918f672742d
)
require (
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
- github.com/BurntSushi/toml v1.2.0 // indirect
- github.com/DataDog/zstd v1.5.2 // indirect
+ github.com/BurntSushi/toml v1.3.2 // indirect
+ github.com/DataDog/zstd v1.5.5 // indirect
github.com/OpenPeeDeeP/depguard v1.0.1 // indirect
github.com/VictoriaMetrics/metrics v1.23.1 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/bits-and-blooms/bitset v1.5.0 // indirect
+ github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/bombsimon/wsl/v2 v2.0.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
- github.com/cockroachdb/errors v1.9.1 // indirect
+ github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3 // indirect
- github.com/cockroachdb/redact v1.1.3 // indirect
+ github.com/cockroachdb/redact v1.1.5 // indirect
github.com/containerd/cgroups v1.1.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
- github.com/deckarep/golang-set/v2 v2.3.0 // indirect
- github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect
+ github.com/deckarep/golang-set/v2 v2.3.1 // indirect
+ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/dgraph-io/badger v1.6.2 // indirect
github.com/dgraph-io/ristretto v0.0.3 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
@@ -109,10 +111,10 @@ require (
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
- github.com/getsentry/sentry-go v0.18.0 // indirect
+ github.com/getsentry/sentry-go v0.25.0 // indirect
github.com/go-critic/go-critic v0.4.0 // indirect
github.com/go-lintpack/lintpack v0.5.2 // indirect
- github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/go-toolsmith/astcast v1.0.0 // indirect
@@ -126,7 +128,7 @@ require (
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/snappy v0.0.4 // indirect
+ github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 // indirect
@@ -146,14 +148,15 @@ require (
github.com/google/pprof v0.0.0-20230405160723-4a4c7d95572b // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 // indirect
+ github.com/grafana/pyroscope-go/godeltaprof v0.1.4 // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect
- github.com/holiman/uint256 v1.2.2 // indirect
- github.com/huin/goupnp v1.1.0 // indirect
+ github.com/holiman/uint256 v1.2.3 // indirect
+ github.com/huin/goupnp v1.3.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/ipfs/go-cid v0.4.1 // indirect
github.com/ipfs/go-datastore v0.6.0 // indirect
@@ -167,7 +170,7 @@ require (
github.com/jbenet/goprocess v0.1.4 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/kisielk/gotool v1.0.0 // indirect
- github.com/klauspost/compress v1.16.4 // indirect
+ github.com/klauspost/compress v1.17.1 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/koron/go-ssdp v0.0.4 // indirect
github.com/kr/pretty v0.3.1 // indirect
@@ -188,7 +191,7 @@ require (
github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
- github.com/mattn/go-runewidth v0.0.14 // indirect
+ github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/dns v1.1.53 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
@@ -220,10 +223,10 @@ require (
github.com/pingcap/log v0.0.0-20211215031037-e024ba4eb0ee // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
- github.com/prometheus/common v0.42.0 // indirect
- github.com/prometheus/procfs v0.9.0 // indirect
- github.com/prometheus/tsdb v0.10.0 // indirect
+ github.com/prometheus/client_model v0.5.0 // indirect
+ github.com/prometheus/common v0.44.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/prometheus/tsdb v0.7.1 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-19 v0.3.3 // indirect
github.com/quic-go/qtls-go1-20 v0.2.3 // indirect
@@ -231,7 +234,7 @@ require (
github.com/quic-go/webtransport-go v0.5.2 // indirect
github.com/raulk/go-watchdog v1.3.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
- github.com/rogpeppe/go-internal v1.9.0 // indirect
+ github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
@@ -245,8 +248,8 @@ require (
github.com/subosito/gotenv v1.4.1 // indirect
github.com/tikv/pd/client v0.0.0-20220216070739-26c668271201 // indirect
github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e // indirect
- github.com/tklauser/go-sysconf v0.3.11 // indirect
- github.com/tklauser/numcpus v0.6.0 // indirect
+ github.com/tklauser/go-sysconf v0.3.12 // indirect
+ github.com/tklauser/numcpus v0.6.1 // indirect
github.com/tommy-muehle/go-mnd v1.1.1 // indirect
github.com/torquem-ch/mdbx-go v0.27.10 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
@@ -256,16 +259,15 @@ require (
github.com/valyala/fastrand v1.1.0 // indirect
github.com/valyala/histogram v1.2.0 // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
- github.com/yusufpapurcu/wmi v1.2.2 // indirect
+ github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opencensus.io v0.24.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/dig v1.16.1 // indirect
go.uber.org/fx v1.19.2 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect
- golang.org/x/mod v0.10.0 // indirect
- golang.org/x/term v0.8.0 // indirect
- golang.org/x/text v0.9.0 // indirect
+ golang.org/x/mod v0.13.0 // indirect
+ golang.org/x/term v0.13.0 // indirect
+ golang.org/x/text v0.13.0 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
diff --git a/go.sum b/go.sum
index d5200066de..4f620c9014 100644
--- a/go.sum
+++ b/go.sum
@@ -60,8 +60,9 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSu
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
-github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
+github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw=
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
@@ -71,8 +72,9 @@ github.com/CloudyKit/jet/v6 v6.1.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
-github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
+github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
+github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY=
github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM=
@@ -138,8 +140,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
-github.com/bits-and-blooms/bitset v1.5.0 h1:NpE8frKRLGHIcEzkR+gZhiioW1+WbYV6fKwD6ZIpQT8=
-github.com/bits-and-blooms/bitset v1.5.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
+github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
@@ -204,8 +206,9 @@ github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD9
github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM=
github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
-github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8=
github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk=
+github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8=
+github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
@@ -214,8 +217,9 @@ github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lg
github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3 h1:S4re5MXHfznkOlgkgUfh9ptgaG2esdH95IuJWwP0fM0=
github.com/cockroachdb/pebble v0.0.0-20230302152029-717cbce0c2e3/go.mod h1:9lRMC4XN3/BLPtIp6kAKwIaHu369NOf2rMucPzipz50=
github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
-github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
+github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
+github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
github.com/coinbase/rosetta-sdk-go v0.7.0 h1:lmTO/JEpCvZgpbkOITL95rA80CPKb5CtMzLaqF2mCNg=
@@ -258,13 +262,13 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6Uh
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
-github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g=
-github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
-github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
+github.com/deckarep/golang-set/v2 v2.3.1 h1:vjmkvJt/IV27WXPyYQpAh4bRyWJc5Y435D17XQ9QU5A=
+github.com/deckarep/golang-set/v2 v2.3.1/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
+github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4=
-github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
+github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
@@ -352,8 +356,9 @@ github.com/gballet/go-verkle v0.0.0-20220902153445-097bd83b7732/go.mod h1:o/XfIX
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c=
-github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0=
github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ=
+github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI=
+github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
@@ -393,8 +398,9 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
-github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
+github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
@@ -518,8 +524,9 @@ github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8l
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
+github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
@@ -625,6 +632,10 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw=
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/grafana/pyroscope-go v1.0.4 h1:oyQX0BOkL+iARXzHuCdIF5TQ7/sRSel1YFViMHC7Bm0=
+github.com/grafana/pyroscope-go v1.0.4/go.mod h1:0d7ftwSMBV/Awm7CCiYmHQEG8Y44Ma3YSjt+nWcWztY=
+github.com/grafana/pyroscope-go/godeltaprof v0.1.4 h1:mDsJ3ngul7UfrHibGQpV66PbZ3q1T8glz/tK3bQKKEk=
+github.com/grafana/pyroscope-go/godeltaprof v0.1.4/go.mod h1:1HSPtjU8vLG0jE9JrTdzjgFqdJ/VgN7fvxBNq3luJko=
github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@@ -682,13 +693,13 @@ github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
-github.com/holiman/uint256 v1.2.2 h1:TXKcSGc2WaxPD2+bmzAsVthL4+pEN0YwXcL5qED83vk=
-github.com/holiman/uint256 v1.2.2/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
+github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
+github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y=
-github.com/huin/goupnp v1.1.0 h1:gEe0Dp/lZmPZiDFzJJaOfUpOvv2MKUkoBX8lDrn9vKU=
-github.com/huin/goupnp v1.1.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
+github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
+github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE=
@@ -816,8 +827,8 @@ github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
-github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU=
-github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g=
+github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
@@ -939,8 +950,8 @@ github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp9
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
-github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
@@ -1147,15 +1158,17 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
@@ -1166,8 +1179,8 @@ github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@@ -1176,11 +1189,11 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=
-github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4=
github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
@@ -1209,8 +1222,9 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
-github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
@@ -1363,12 +1377,12 @@ github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiff
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
-github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
-github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
+github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
+github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
-github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
-github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
+github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
+github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tommy-muehle/go-mnd v1.1.1 h1:4D0wuPKjOTiK2garzuPGGvm4zZ/wLYDOH8TJSABC7KU=
github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
@@ -1445,8 +1459,9 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
+github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
@@ -1546,8 +1561,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
-golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1564,8 +1579,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk
golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
-golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
-golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -1597,8 +1612,8 @@ golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20211013180041-c96bc1413d57/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
-golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
-golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1673,8 +1688,8 @@ golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfS
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1707,8 +1722,8 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
-golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
+golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1826,15 +1841,17 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
-golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1847,8 +1864,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1946,8 +1963,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
-golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
-golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
+golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -2093,8 +2110,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
-google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/hmy/downloader/adapter_test.go b/hmy/downloader/adapter_test.go
index 692ed8ad77..4bc023b5cc 100644
--- a/hmy/downloader/adapter_test.go
+++ b/hmy/downloader/adapter_test.go
@@ -8,11 +8,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/consensus/reward"
"github.com/harmony-one/harmony/core/state"
+ "github.com/harmony-one/harmony/core/state/snapshot"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/internal/params"
@@ -88,7 +90,9 @@ func (bc *testBlockChain) changeBlockNumber(val uint64) {
func (bc *testBlockChain) ShardID() uint32 { return 0 }
func (bc *testBlockChain) ReadShardState(epoch *big.Int) (*shard.State, error) { return nil, nil }
+func (bc *testBlockChain) Snapshots() *snapshot.Tree { return nil }
func (bc *testBlockChain) TrieNode(hash common.Hash) ([]byte, error) { return []byte{}, nil }
+func (bc *testBlockChain) TrieDB() *trie.Database { return nil }
func (bc *testBlockChain) Config() *params.ChainConfig { return nil }
func (bc *testBlockChain) WriteCommitSig(blockNum uint64, lastCommits []byte) error { return nil }
func (bc *testBlockChain) GetHeader(hash common.Hash, number uint64) *block.Header { return nil }
diff --git a/hmy/hmy.go b/hmy/hmy.go
index 24f0caa127..097e597d02 100644
--- a/hmy/hmy.go
+++ b/hmy/hmy.go
@@ -120,6 +120,7 @@ type NodeAPI interface {
GetConfig() commonRPC.Config
ShutDown()
GetLastSigningPower() (float64, error)
+ GetLastSigningPower2() (float64, error)
}
// New creates a new Harmony object (including the
diff --git a/internal/chain/engine.go b/internal/chain/engine.go
index 8e07f7a1e4..d0e8e02db7 100644
--- a/internal/chain/engine.go
+++ b/internal/chain/engine.go
@@ -456,6 +456,13 @@ func setElectionEpochAndMinFee(chain engine.ChainReader, header *block.Header, s
isElected[addr] = struct{}{}
}
+ if config.IsMaxRate(newShardState.Epoch) {
+ for _, addr := range chain.ValidatorCandidates() {
+ if _, err := availability.UpdateMaxCommissionFee(state, addr, minRate); err != nil {
+ return err
+ }
+ }
+ }
// due to a bug in the old implementation of the minimum fee,
// unelected validators did not have their fee updated even
// when the protocol required them to do so. here we fix it,
diff --git a/internal/chain/engine_test.go b/internal/chain/engine_test.go
index 7654d9d6cd..530cbdc019 100644
--- a/internal/chain/engine_test.go
+++ b/internal/chain/engine_test.go
@@ -5,6 +5,7 @@ import (
"math/big"
"testing"
+ "github.com/ethereum/go-ethereum/trie"
bls_core "github.com/harmony-one/bls/ffi/go/bls"
"github.com/harmony-one/harmony/block"
blockfactory "github.com/harmony-one/harmony/block/factory"
@@ -21,6 +22,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/harmony-one/harmony/core/rawdb"
"github.com/harmony-one/harmony/core/state"
+ "github.com/harmony-one/harmony/core/state/snapshot"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/internal/params"
)
@@ -328,6 +330,7 @@ func (bc *fakeBlockChain) ContractCode(hash common.Hash) ([]byte, error)
func (bc *fakeBlockChain) ValidatorCode(hash common.Hash) ([]byte, error) { return []byte{}, nil }
func (bc *fakeBlockChain) ShardID() uint32 { return 0 }
func (bc *fakeBlockChain) ReadShardState(epoch *big.Int) (*shard.State, error) { return nil, nil }
+func (bc *fakeBlockChain) TrieDB() *trie.Database { return nil }
func (bc *fakeBlockChain) TrieNode(hash common.Hash) ([]byte, error) { return []byte{}, nil }
func (bc *fakeBlockChain) WriteCommitSig(blockNum uint64, lastCommits []byte) error { return nil }
func (bc *fakeBlockChain) GetHeaderByNumber(number uint64) *block.Header { return nil }
@@ -353,6 +356,9 @@ func (bc *fakeBlockChain) Config() *params.ChainConfig {
func (cr *fakeBlockChain) StateAt(root common.Hash) (*state.DB, error) {
return nil, nil
}
+func (cr *fakeBlockChain) Snapshots() *snapshot.Tree {
+ return nil
+}
func (bc *fakeBlockChain) ReadValidatorSnapshot(addr common.Address) (*staking.ValidatorSnapshot, error) {
return nil, nil
}
diff --git a/internal/configs/harmony/harmony.go b/internal/configs/harmony/harmony.go
index 2fcb200c42..276c90d05a 100644
--- a/internal/configs/harmony/harmony.go
+++ b/internal/configs/harmony/harmony.go
@@ -37,6 +37,7 @@ type HarmonyConfig struct {
ShardData ShardDataConfig
GPO GasPriceOracleConfig
Preimage *PreimageConfig
+ Cache CacheConfig
}
func (hc HarmonyConfig) ToRPCServerConfig() nodeconfig.RPCServerConfig {
@@ -138,7 +139,6 @@ type GeneralConfig struct {
TraceEnable bool
EnablePruneBeaconChain bool
RunElasticMode bool
- TriesInMemory int
}
type TiKVConfig struct {
@@ -306,6 +306,17 @@ type RevertConfig struct {
RevertBefore int
}
+type CacheConfig struct {
+ Disabled bool // Whether to disable trie write caching (archive node)
+ TrieNodeLimit int // Memory limit (MB) at which to flush the current in-memory trie to disk
+ TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
+ TriesInMemory uint64 // Block number from the head stored in disk before exiting
+ Preimages bool // Whether to store preimage of trie key to the disk
+ SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
+ SnapshotNoBuild bool // Whether the background generation is allowed
+ SnapshotWait bool // Wait for snapshot construction on startup
+}
+
type PreimageConfig struct {
ImportFrom string
ExportTo string
@@ -329,6 +340,7 @@ type PrometheusConfig struct {
type SyncConfig struct {
// TODO: Remove this bool after stream sync is fully up.
Enabled bool // enable the stream sync protocol
+ SyncMode uint32 // sync mode (default:Full sync, 1: Fast Sync, 2: Snap Sync(not implemented yet))
Downloader bool // start the sync downloader client
StagedSync bool // use staged sync
StagedSyncCfg StagedSyncConfig // staged sync configurations
diff --git a/internal/configs/sharding/partner.go b/internal/configs/sharding/partner.go
index 99ea96141b..bbf5dccb4a 100644
--- a/internal/configs/sharding/partner.go
+++ b/internal/configs/sharding/partner.go
@@ -40,6 +40,8 @@ const (
func (ps partnerSchedule) InstanceForEpoch(epoch *big.Int) Instance {
switch {
+ case params.PartnerChainConfig.IsDevnetExternalEpoch(epoch):
+ return partnerV3
case params.PartnerChainConfig.IsHIP30(epoch):
return partnerV2
case epoch.Cmp(params.PartnerChainConfig.StakingEpoch) >= 0:
@@ -97,17 +99,25 @@ var partnerV0 = MustNewInstance(
partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch(),
)
var partnerV1 = MustNewInstance(
- 2, 5, 4, 0,
+ 2, 15, 4, 0,
numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts,
genesis.TNFoundationalAccounts, emptyAllowlist, nil,
numeric.ZeroDec(), ethCommon.Address{},
partnerReshardingEpoch, PartnerSchedule.BlocksPerEpoch(),
)
var partnerV2 = MustNewInstance(
- 2, 5, 4, 0,
+ 2, 20, 4, 0,
numeric.MustNewDecFromStr("0.9"), genesis.TNHarmonyAccounts,
genesis.TNFoundationalAccounts, emptyAllowlist,
feeCollectorsDevnet[1], numeric.MustNewDecFromStr("0.25"),
hip30CollectionAddressTestnet, partnerReshardingEpoch,
PartnerSchedule.BlocksPerEpoch(),
)
+var partnerV3 = MustNewInstance(
+ 2, 20, 0, 0,
+ numeric.MustNewDecFromStr("0.0"), genesis.TNHarmonyAccounts,
+ genesis.TNFoundationalAccounts, emptyAllowlist,
+ feeCollectorsDevnet[1], numeric.MustNewDecFromStr("0.25"),
+ hip30CollectionAddressTestnet, partnerReshardingEpoch,
+ PartnerSchedule.BlocksPerEpoch(),
+)
diff --git a/internal/params/config.go b/internal/params/config.go
index c96bf8ab9b..b0c6c70d11 100644
--- a/internal/params/config.go
+++ b/internal/params/config.go
@@ -75,7 +75,8 @@ var (
ValidatorCodeFixEpoch: big.NewInt(1535), // 2023-07-20 05:51:07+00:00
HIP30Epoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00
BlockGas30MEpoch: big.NewInt(1673), // 2023-11-02 17:30:00+00:00
- MaxRateEpoch: big.NewInt(1733), // 2023-12-17 12:20:15+00:00
+ MaxRateEpoch: EpochTBD,
+ DevnetExternalEpoch: EpochTBD,
}
// TestnetChainConfig contains the chain parameters to run a node on the harmony test network.
@@ -119,7 +120,8 @@ var (
ValidatorCodeFixEpoch: big.NewInt(1296), // 2023-04-28 07:14:20+00:00
HIP30Epoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00
BlockGas30MEpoch: big.NewInt(2176), // 2023-10-12 10:00:00+00:00
- MaxRateEpoch: big.NewInt(2520), // 2023-12-16 12:17:14+00:00
+ MaxRateEpoch: EpochTBD,
+ DevnetExternalEpoch: EpochTBD,
}
// PangaeaChainConfig contains the chain parameters for the Pangaea network.
// All features except for CrossLink are enabled at launch.
@@ -164,6 +166,7 @@ var (
HIP30Epoch: EpochTBD,
BlockGas30MEpoch: big.NewInt(0),
MaxRateEpoch: EpochTBD,
+ DevnetExternalEpoch: EpochTBD,
}
// PartnerChainConfig contains the chain parameters for the Partner network.
@@ -202,13 +205,14 @@ var (
SlotsLimitedEpoch: EpochTBD, // epoch to enable HIP-16
CrossShardXferPrecompileEpoch: big.NewInt(5),
AllowlistEpoch: EpochTBD,
- LeaderRotationInternalValidatorsEpoch: EpochTBD,
- LeaderRotationExternalValidatorsEpoch: EpochTBD,
+ LeaderRotationInternalValidatorsEpoch: big.NewInt(144),
+ LeaderRotationExternalValidatorsEpoch: big.NewInt(144),
FeeCollectEpoch: big.NewInt(5),
ValidatorCodeFixEpoch: big.NewInt(5),
HIP30Epoch: big.NewInt(7),
BlockGas30MEpoch: big.NewInt(7),
MaxRateEpoch: EpochTBD,
+ DevnetExternalEpoch: big.NewInt(144),
}
// StressnetChainConfig contains the chain parameters for the Stress test network.
@@ -254,6 +258,7 @@ var (
HIP30Epoch: EpochTBD,
BlockGas30MEpoch: big.NewInt(0),
MaxRateEpoch: EpochTBD,
+ DevnetExternalEpoch: EpochTBD,
}
// LocalnetChainConfig contains the chain parameters to run for local development.
@@ -298,6 +303,7 @@ var (
HIP30Epoch: EpochTBD,
BlockGas30MEpoch: big.NewInt(0),
MaxRateEpoch: EpochTBD,
+ DevnetExternalEpoch: EpochTBD,
}
// AllProtocolChanges ...
@@ -344,6 +350,7 @@ var (
big.NewInt(0), // BlockGas30M
big.NewInt(0), // BlockGas30M
big.NewInt(0), // MaxRateEpoch
+ big.NewInt(0),
}
// TestChainConfig ...
@@ -390,6 +397,7 @@ var (
big.NewInt(0), // HIP30Epoch
big.NewInt(0), // BlockGas30M
big.NewInt(0), // MaxRateEpoch
+ big.NewInt(0),
}
// TestRules ...
@@ -554,6 +562,8 @@ type ChainConfig struct {
// 4. Change the minimum validator commission from 5 to 7% (all nets)
HIP30Epoch *big.Int `json:"hip30-epoch,omitempty"`
+ DevnetExternalEpoch *big.Int `json:"devnet-external-epoch,omitempty"`
+
BlockGas30MEpoch *big.Int `json:"block-gas-30m-epoch,omitempty"`
// MaxRateEpoch will make sure the validator max-rate is at least equal to the minRate + the validator max-rate-increase
@@ -817,6 +827,10 @@ func (c *ChainConfig) IsHIP30(epoch *big.Int) bool {
return isForked(c.HIP30Epoch, epoch)
}
+func (c *ChainConfig) IsDevnetExternalEpoch(epoch *big.Int) bool {
+ return isForked(c.DevnetExternalEpoch, epoch)
+}
+
func (c *ChainConfig) IsMaxRate(epoch *big.Int) bool {
return isForked(c.MaxRateEpoch, epoch)
}
diff --git a/internal/shardchain/shardchains.go b/internal/shardchain/shardchains.go
index 5da1b9186f..6a9e9230ac 100644
--- a/internal/shardchain/shardchains.go
+++ b/internal/shardchain/shardchains.go
@@ -3,7 +3,6 @@ package shardchain
import (
"math/big"
"sync"
- "time"
"github.com/harmony-one/harmony/core/state"
harmonyconfig "github.com/harmony-one/harmony/internal/configs/harmony"
@@ -110,14 +109,19 @@ func (sc *CollectionImpl) ShardChain(shardID uint32, options ...core.Options) (c
Uint32("shardID", shardID).
Msg("disable cache, running in archival mode")
} else {
- cacheConfig = &core.CacheConfig{
- TrieNodeLimit: 256,
- TrieTimeLimit: 2 * time.Minute,
- TriesInMemory: 128,
- Preimages: true,
- }
- if sc.harmonyconfig != nil {
- cacheConfig.TriesInMemory = uint64(sc.harmonyconfig.General.TriesInMemory)
+ hc := sc.harmonyconfig
+ if hc != nil {
+ cacheConfig = &core.CacheConfig{
+ Disabled: hc.Cache.Disabled,
+ TrieNodeLimit: hc.Cache.TrieNodeLimit,
+ TrieTimeLimit: hc.Cache.TrieTimeLimit,
+ TriesInMemory: hc.Cache.TriesInMemory,
+ SnapshotLimit: hc.Cache.SnapshotLimit,
+ SnapshotWait: hc.Cache.SnapshotWait,
+ Preimages: hc.Cache.Preimages,
+ }
+ } else {
+ cacheConfig = nil
}
}
diff --git a/internal/utils/blockedpeers/manager.go b/internal/utils/blockedpeers/manager.go
new file mode 100644
index 0000000000..a56fe29e6f
--- /dev/null
+++ b/internal/utils/blockedpeers/manager.go
@@ -0,0 +1,43 @@
+package blockedpeers
+
+import (
+ "time"
+
+ "github.com/harmony-one/harmony/internal/utils/lrucache"
+ "github.com/libp2p/go-libp2p/core/peer"
+)
+
+type Manager struct {
+ internal *lrucache.Cache[peer.ID, time.Time]
+}
+
+func NewManager(size int) *Manager {
+ return &Manager{
+ internal: lrucache.NewCache[peer.ID, time.Time](size),
+ }
+}
+
+func (m *Manager) IsBanned(key peer.ID, now time.Time) bool {
+ future, ok := m.internal.Get(key)
+
+ if ok {
+ return future.After(now) // future > now
+ }
+ return ok
+}
+
+func (m *Manager) Ban(key peer.ID, future time.Time) {
+ m.internal.Set(key, future)
+}
+
+func (m *Manager) Contains(key peer.ID) bool {
+ return m.internal.Contains(key)
+}
+
+func (m *Manager) Len() int {
+ return m.internal.Len()
+}
+
+func (m *Manager) Keys() []peer.ID {
+ return m.internal.Keys()
+}
diff --git a/internal/utils/blockedpeers/manager_test.go b/internal/utils/blockedpeers/manager_test.go
new file mode 100644
index 0000000000..b1bccb54ab
--- /dev/null
+++ b/internal/utils/blockedpeers/manager_test.go
@@ -0,0 +1,27 @@
+package blockedpeers
+
+import (
+ "testing"
+ "time"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewManager(t *testing.T) {
+ var (
+ peer1 peer.ID = "peer1"
+ now = time.Now()
+ m = NewManager(4)
+ )
+
+ t.Run("check_empty", func(t *testing.T) {
+ require.False(t, m.IsBanned(peer1, now), "peer1 should not be banned")
+ })
+ t.Run("ban_peer1", func(t *testing.T) {
+ m.Ban(peer1, now.Add(2*time.Second))
+ require.True(t, m.IsBanned(peer1, now), "peer1 should be banned")
+ require.False(t, m.IsBanned(peer1, now.Add(3*time.Second)), "peer1 should not be banned after 3 seconds")
+ })
+
+}
diff --git a/internal/utils/lrucache/lrucache.go b/internal/utils/lrucache/lrucache.go
index 4859811b51..e20424a108 100644
--- a/internal/utils/lrucache/lrucache.go
+++ b/internal/utils/lrucache/lrucache.go
@@ -25,3 +25,21 @@ func (c *Cache[K, V]) Get(key K) (V, bool) {
func (c *Cache[K, V]) Set(key K, value V) {
c.cache.Add(key, value)
}
+
+// Contains checks if a key is in the cache, without updating the
+// recent-ness or deleting it for being stale.
+func (c *Cache[K, V]) Contains(key K) bool {
+ return c.cache.Contains(key)
+}
+
+func (c *Cache[K, V]) Len() int {
+ return c.cache.Len()
+}
+
+func (c *Cache[K, V]) Keys() []K {
+ out := make([]K, 0, c.cache.Len())
+ for _, v := range c.cache.Keys() {
+ out = append(out, v.(K))
+ }
+ return out
+}
diff --git a/internal/utils/lrucache/lrucache_test.go b/internal/utils/lrucache/lrucache_test.go
new file mode 100644
index 0000000000..005603b9db
--- /dev/null
+++ b/internal/utils/lrucache/lrucache_test.go
@@ -0,0 +1,28 @@
+package lrucache
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestKeys(t *testing.T) {
+ c := NewCache[int, int](10)
+
+ for i := 0; i < 3; i++ {
+ c.Set(i, i)
+ }
+ m := map[int]int{
+ 0: 0,
+ 1: 1,
+ 2: 2,
+ }
+ keys := c.Keys()
+
+ m2 := map[int]int{}
+ for _, k := range keys {
+ m2[k] = k
+ }
+
+ require.Equal(t, m, m2)
+}
diff --git a/internal/utils/math.go b/internal/utils/math.go
new file mode 100644
index 0000000000..6dceec5eb2
--- /dev/null
+++ b/internal/utils/math.go
@@ -0,0 +1,17 @@
+package utils
+
+import "golang.org/x/exp/constraints"
+
+func Min[T constraints.Ordered](a, b T) T {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func Max[T constraints.Ordered](a, b T) T {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/node/api.go b/node/api.go
index ceda968084..e3862f510c 100644
--- a/node/api.go
+++ b/node/api.go
@@ -2,7 +2,9 @@ package node
import (
"github.com/harmony-one/harmony/consensus/quorum"
+ "github.com/harmony-one/harmony/consensus/votepower"
"github.com/harmony-one/harmony/core/types"
+ "github.com/harmony-one/harmony/crypto/bls"
"github.com/harmony-one/harmony/eth/rpc"
"github.com/harmony-one/harmony/hmy"
"github.com/harmony-one/harmony/internal/tikv"
@@ -175,7 +177,7 @@ func (node *Node) GetConfig() rpc_common.Config {
// GetLastSigningPower get last signed power
func (node *Node) GetLastSigningPower() (float64, error) {
- power, err := node.Consensus.Decider.CurrentTotalPower(quorum.Commit)
+ power, err := node.Consensus.Decider().CurrentTotalPower(quorum.Commit)
if err != nil {
return 0, err
}
@@ -183,3 +185,29 @@ func (node *Node) GetLastSigningPower() (float64, error) {
round := float64(power.MulInt64(10000).RoundInt64()) / 10000
return round, nil
}
+
+func (node *Node) GetLastSigningPower2() (float64, error) {
+ bc := node.Consensus.Blockchain()
+ cur := bc.CurrentBlock()
+ ss, err := bc.ReadShardState(cur.Epoch())
+ if err != nil {
+ return 0, err
+ }
+ roster, err := votepower.Compute(&ss.Shards[bc.ShardID()], cur.Epoch())
+ if err != nil {
+ return 0, err
+ }
+ blsPubKeys, err := ss.Shards[bc.ShardID()].BLSPublicKeys()
+ if err != nil {
+ return 0, err
+ }
+
+ mask := bls.NewMask(blsPubKeys)
+ err = mask.SetMask(cur.Header().LastCommitBitmap())
+ if err != nil {
+ return 0, err
+ }
+ power := roster.VotePowerByMask(mask)
+ round := float64(power.MulInt64(10000).RoundInt64()) / 10000
+ return round, nil
+}
diff --git a/node/node.go b/node/node.go
index c34b018d6f..f80c502a0a 100644
--- a/node/node.go
+++ b/node/node.go
@@ -52,7 +52,6 @@ import (
"github.com/harmony-one/harmony/node/worker"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/shard"
- "github.com/harmony-one/harmony/shard/committee"
"github.com/harmony-one/harmony/staking/reward"
"github.com/harmony-one/harmony/staking/slash"
staking "github.com/harmony-one/harmony/staking/types"
@@ -82,7 +81,7 @@ type syncConfig struct {
}
type ISync interface {
- UpdateBlockAndStatus(block *types.Block, bc core.BlockChain, verifyAllSig bool) error
+ UpdateBlockAndStatus(block *types.Block, bc core.BlockChain) error
AddLastMileBlock(block *types.Block)
GetActivePeerNumber() int
CreateSyncConfig(peers []p2p.Peer, shardID uint32, selfPeerID libp2p_peer.ID, waitForEachPeerToConnect bool) error
@@ -158,10 +157,6 @@ func (node *Node) SyncInstance() ISync {
return node.GetOrCreateSyncInstance(true)
}
-func (node *Node) CurrentSyncInstance() bool {
- return node.GetOrCreateSyncInstance(false) != nil
-}
-
// GetOrCreateSyncInstance returns an instance of state sync, either legacy or staged
// if initiate sets to true, it generates a new instance
func (node *Node) GetOrCreateSyncInstance(initiate bool) ISync {
@@ -504,10 +499,6 @@ func (node *Node) validateNodeMessage(ctx context.Context, payload []byte) (
utils.Logger().Debug().Uint64("receivedNum", block.NumberU64()).
Uint64("currentNum", curBeaconHeight).Msg("beacon block sync message rejected")
return nil, 0, errors.New("beacon block height smaller than current height beyond tolerance")
- } else if block.NumberU64()-beaconBlockHeightTolerance > curBeaconHeight {
- utils.Logger().Debug().Uint64("receivedNum", block.NumberU64()).
- Uint64("currentNum", curBeaconHeight).Msg("beacon block sync message rejected")
- return nil, 0, errors.Errorf("beacon block height too much higher than current height beyond tolerance, block %d, current %d, epoch %d , current %d", block.NumberU64(), curBeaconHeight, block.Epoch().Uint64(), curBeaconBlock.Epoch().Uint64())
} else if block.NumberU64() <= curBeaconHeight {
utils.Logger().Debug().Uint64("receivedNum", block.NumberU64()).
Uint64("currentNum", curBeaconHeight).Msg("beacon block sync message ignored")
@@ -555,7 +546,7 @@ func (node *Node) validateNodeMessage(ctx context.Context, payload []byte) (
// validate shardID
// validate public key size
// verify message signature
-func validateShardBoundMessage(consensus *consensus.Consensus, nodeConfig *nodeconfig.ConfigType, payload []byte,
+func validateShardBoundMessage(consensus *consensus.Consensus, peer libp2p_peer.ID, nodeConfig *nodeconfig.ConfigType, payload []byte,
) (*msg_pb.Message, *bls.SerializedPublicKey, bool, error) {
var (
m msg_pb.Message
@@ -664,7 +655,7 @@ func validateShardBoundMessage(consensus *consensus.Consensus, nodeConfig *nodec
return nil, nil, true, errors.WithStack(shard.ErrValidNotInCommittee)
}
} else {
- count := consensus.Decider.ParticipantsCount()
+ count := consensus.Decider().ParticipantsCount()
if (count+7)>>3 != int64(len(senderBitmap)) {
nodeConsensusMessageCounterVec.With(prometheus.Labels{"type": "invalid_participant_count"}).Inc()
return nil, nil, true, errors.WithStack(errWrongSizeOfBitmap)
@@ -736,6 +727,7 @@ func (node *Node) StartPubSub() error {
// p2p consensus message handler function
type p2pHandlerConsensus func(
ctx context.Context,
+ peer libp2p_peer.ID,
msg *msg_pb.Message,
key *bls.SerializedPublicKey,
) error
@@ -749,6 +741,7 @@ func (node *Node) StartPubSub() error {
// interface pass to p2p message validator
type validated struct {
+ peerID libp2p_peer.ID
consensusBound bool
handleC p2pHandlerConsensus
handleCArg *msg_pb.Message
@@ -806,7 +799,7 @@ func (node *Node) StartPubSub() error {
// validate consensus message
validMsg, senderPubKey, ignore, err := validateShardBoundMessage(
- node.Consensus, node.NodeConfig, openBox[proto.MessageCategoryBytes:],
+ node.Consensus, peer, node.NodeConfig, openBox[proto.MessageCategoryBytes:],
)
if err != nil {
@@ -820,6 +813,7 @@ func (node *Node) StartPubSub() error {
}
msg.ValidatorData = validated{
+ peerID: peer,
consensusBound: true,
handleC: node.Consensus.HandleMessageUpdate,
handleCArg: validMsg,
@@ -850,6 +844,7 @@ func (node *Node) StartPubSub() error {
}
}
msg.ValidatorData = validated{
+ peerID: peer,
consensusBound: false,
handleE: node.HandleNodeMessage,
handleEArg: validMsg,
@@ -901,7 +896,7 @@ func (node *Node) StartPubSub() error {
errChan <- withError{err, nil}
}
} else {
- if err := msg.handleC(ctx, msg.handleCArg, msg.senderPubKey); err != nil {
+ if err := msg.handleC(ctx, msg.peerID, msg.handleCArg, msg.senderPubKey); err != nil {
errChan <- withError{err, msg.senderPubKey}
}
}
@@ -1235,72 +1230,6 @@ func (node *Node) updateInitialRewardValues() {
reward.SetTotalInitialTokens(initTotal)
}
-// InitConsensusWithValidators initialize shard state
-// from latest epoch and update committee pub
-// keys for consensus
-func (node *Node) InitConsensusWithValidators() (err error) {
- if node.Consensus == nil {
- utils.Logger().Error().
- Msg("[InitConsensusWithValidators] consenus is nil; Cannot figure out shardID")
- return errors.New(
- "[InitConsensusWithValidators] consenus is nil; Cannot figure out shardID",
- )
- }
- shardID := node.Consensus.ShardID
- currentBlock := node.Blockchain().CurrentBlock()
- blockNum := currentBlock.NumberU64()
- node.Consensus.SetMode(consensus.Listening)
- epoch := currentBlock.Epoch()
- utils.Logger().Info().
- Uint64("blockNum", blockNum).
- Uint32("shardID", shardID).
- Uint64("epoch", epoch.Uint64()).
- Msg("[InitConsensusWithValidators] Try To Get PublicKeys")
- shardState, err := committee.WithStakingEnabled.Compute(
- epoch, node.Consensus.Blockchain(),
- )
- if err != nil {
- utils.Logger().Err(err).
- Uint64("blockNum", blockNum).
- Uint32("shardID", shardID).
- Uint64("epoch", epoch.Uint64()).
- Msg("[InitConsensusWithValidators] Failed getting shard state")
- return err
- }
- subComm, err := shardState.FindCommitteeByID(shardID)
- if err != nil {
- utils.Logger().Err(err).
- Interface("shardState", shardState).
- Msg("[InitConsensusWithValidators] Find CommitteeByID")
- return err
- }
- pubKeys, err := subComm.BLSPublicKeys()
- if err != nil {
- utils.Logger().Error().
- Uint32("shardID", shardID).
- Uint64("blockNum", blockNum).
- Msg("[InitConsensusWithValidators] PublicKeys is Empty, Cannot update public keys")
- return errors.Wrapf(
- err,
- "[InitConsensusWithValidators] PublicKeys is Empty, Cannot update public keys",
- )
- }
-
- for _, key := range pubKeys {
- if node.Consensus.GetPublicKeys().Contains(key.Object) {
- utils.Logger().Info().
- Uint64("blockNum", blockNum).
- Int("numPubKeys", len(pubKeys)).
- Str("mode", node.Consensus.Mode().String()).
- Msg("[InitConsensusWithValidators] Successfully updated public keys")
- node.Consensus.UpdatePublicKeys(pubKeys, shard.Schedule.InstanceForEpoch(epoch).ExternalAllowlist())
- node.Consensus.SetMode(consensus.Normal)
- return nil
- }
- }
- return nil
-}
-
func (node *Node) initNodeConfiguration() (service.NodeConfig, chan p2p.Peer, error) {
chanPeer := make(chan p2p.Peer)
nodeConfig := service.NodeConfig{
diff --git a/node/node.md b/node/node.md
index a783df5ead..168c7edeb5 100644
--- a/node/node.md
+++ b/node/node.md
@@ -3,7 +3,7 @@
### Services
In Harmony network, a node can be treated as one of the roles: validator, leader, beacon validator,
-beacon leader depending on its context. With each role, a node can run a certian set of services.
+beacon leader depending on its context. With each role, a node can run a certain set of services.
For example, a leader needs to run explorer support service, syncing support
service etc.. while a normal validator does not run such many.
@@ -13,8 +13,8 @@ service etc.. while a normal validator does not run such many.
To support such behavior, we architecture Node logic with service manager which can wait for actions
which each triggers its management operation such as starting some service, stopping some service.
-Each service needs to implement minimal interace behavior like Start, Stop so that the service
-manager can handle those operation.
+Each service needs to implement minimal interface behavior like Start, Stop so that the service
+manager can handle those operations.
```go
// ServiceInterface is the collection of functions any service needs to implement.
@@ -26,7 +26,7 @@ type ServiceInterface interface {
### Creating a service.
-To create a service, you need to have an struct which implements above interface function
+To create a service, you need to have a struct which implements above interface function
`StartService`, `StopService`.
Since different services may have different ways to be created you may need to have a method
diff --git a/node/node_explorer.go b/node/node_explorer.go
index fbb5b88985..1e4a4010a0 100644
--- a/node/node_explorer.go
+++ b/node/node_explorer.go
@@ -53,7 +53,7 @@ func (node *Node) explorerMessageHandler(ctx context.Context, msg *msg_pb.Messag
return err
}
- if !node.Consensus.Decider.IsQuorumAchievedByMask(mask) {
+ if !node.Consensus.Decider().IsQuorumAchievedByMask(mask) {
utils.Logger().Error().Msg("[Explorer] not have enough signature power")
return nil
}
@@ -154,7 +154,7 @@ func (node *Node) AddNewBlockForExplorer(block *types.Block) {
utils.Logger().Info().Uint64("blockHeight", block.NumberU64()).Msg("[Explorer] Adding new block for explorer node")
- if _, err := node.Blockchain().InsertChain([]*types.Block{block}, false); err == nil {
+ if _, err := node.Blockchain().InsertChain([]*types.Block{block}, false); err == nil || errors.Is(err, core.ErrKnownBlock) {
if block.IsLastBlockInEpoch() {
node.Consensus.UpdateConsensusInformation()
}
diff --git a/node/node_handler.go b/node/node_handler.go
index 92c3396d4b..b745ca7136 100644
--- a/node/node_handler.go
+++ b/node/node_handler.go
@@ -76,8 +76,6 @@ func (node *Node) HandleNodeMessage(
if node.Blockchain().ShardID() != shard.BeaconChainShardID {
for _, block := range blocks {
if block.ShardID() == 0 {
- utils.Logger().Info().
- Msgf("Beacon block being handled by block channel: %d", block.NumberU64())
if block.IsLastBlockInEpoch() {
go func(blk *types.Block) {
node.BeaconBlockChannel <- blk
@@ -339,7 +337,7 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block) error {
}
BroadcastCXReceipts(newBlock, node.Consensus)
} else {
- if node.Consensus.Mode() != consensus.Listening {
+ if mode := node.Consensus.Mode(); mode != consensus.Listening {
numSignatures := node.Consensus.NumSignaturesIncludedInBlock(newBlock)
utils.Logger().Info().
Uint64("blockNum", newBlock.NumberU64()).
@@ -349,7 +347,13 @@ func (node *Node) PostConsensusProcessing(newBlock *types.Block) error {
Int("numTxns", len(newBlock.Transactions())).
Int("numStakingTxns", len(newBlock.StakingTransactions())).
Uint32("numSignatures", numSignatures).
+ Str("mode", mode.String()).
Msg("BINGO !!! Reached Consensus")
+ if node.Consensus.Mode() == consensus.Syncing {
+ mode = node.Consensus.UpdateConsensusInformation()
+ utils.Logger().Info().Msgf("Switching to mode %s", mode)
+ node.Consensus.SetMode(mode)
+ }
node.Consensus.UpdateValidatorMetrics(float64(numSignatures), float64(newBlock.NumberU64()))
diff --git a/node/node_handler_test.go b/node/node_handler_test.go
index a5085652b0..867a9616dc 100644
--- a/node/node_handler_test.go
+++ b/node/node_handler_test.go
@@ -134,7 +134,7 @@ func TestVerifyNewBlock(t *testing.T) {
// work around vrf verification as it's tested in another test.
node.Blockchain().Config().VRFEpoch = big.NewInt(2)
- if err := consensus.VerifyNewBlock(nil, node.Blockchain(), node.Beaconchain())(block); err != nil {
+ if err := node.Blockchain().ValidateNewBlock(block, node.Beaconchain()); err != nil {
t.Error("New block is not verified successfully:", err)
}
}
diff --git a/node/node_newblock_test.go b/node/node_newblock_test.go
index 86dd1e6c7e..5780b7cda0 100644
--- a/node/node_newblock_test.go
+++ b/node/node_newblock_test.go
@@ -74,7 +74,7 @@ func TestFinalizeNewBlockAsync(t *testing.T) {
commitSigs, func() uint64 { return 0 }, common.Address{}, nil, nil,
)
- if err := consensus.VerifyNewBlock(nil, blockchain, nil)(block); err != nil {
+ if err := blockchain.ValidateNewBlock(block, blockchain); err != nil {
t.Error("New block is not verified successfully:", err)
}
diff --git a/node/node_syncing.go b/node/node_syncing.go
index fa90ec5c78..b1ee21ea7d 100644
--- a/node/node_syncing.go
+++ b/node/node_syncing.go
@@ -7,30 +7,28 @@ import (
"strconv"
"time"
- "github.com/harmony-one/harmony/internal/tikv"
- "github.com/multiformats/go-multiaddr"
-
- prom "github.com/harmony-one/harmony/api/service/prometheus"
- "github.com/prometheus/client_golang/prometheus"
-
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
- lru "github.com/hashicorp/golang-lru"
- "github.com/pkg/errors"
-
"github.com/harmony-one/harmony/api/service"
"github.com/harmony-one/harmony/api/service/legacysync"
legdownloader "github.com/harmony-one/harmony/api/service/legacysync/downloader"
downloader_pb "github.com/harmony-one/harmony/api/service/legacysync/downloader/proto"
+ prom "github.com/harmony-one/harmony/api/service/prometheus"
"github.com/harmony-one/harmony/api/service/stagedstreamsync"
"github.com/harmony-one/harmony/api/service/stagedsync"
"github.com/harmony-one/harmony/api/service/synchronize"
+ "github.com/harmony-one/harmony/consensus"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/core/types"
nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
+ "github.com/harmony-one/harmony/internal/tikv"
"github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/p2p"
"github.com/harmony-one/harmony/shard"
+ lru "github.com/hashicorp/golang-lru"
+ "github.com/multiformats/go-multiaddr"
+ "github.com/pkg/errors"
+ "github.com/prometheus/client_golang/prometheus"
)
// Constants related to doing syncing.
@@ -233,7 +231,16 @@ func (node *Node) doBeaconSyncing() {
// If Downloader is not working, we need also deal with blocks from beaconBlockChannel
go func(node *Node) {
// TODO ek – infinite loop; add shutdown/cleanup logic
- for _ = range node.BeaconBlockChannel {
+ for b := range node.BeaconBlockChannel {
+ if b != nil && b.IsLastBlockInEpoch() {
+ _, err := node.EpochChain().InsertChain(types.Blocks{b}, true)
+ if err != nil {
+ utils.Logger().Error().Err(err).Msgf("[SYNC] InsertChain failed shard: %d epoch:%d number:%d", b.Header().ShardID(), b.Epoch().Uint64(), b.NumberU64())
+ } else {
+ utils.Logger().Info().
+ Msgf("Beacon block being handled by block channel: epoch: %d, number: %d", b.Epoch().Uint64(), b.NumberU64())
+ }
+ }
}
}(node)
}
@@ -279,20 +286,16 @@ func (node *Node) DoSyncing(bc core.BlockChain, willJoinConsensus bool) {
for {
select {
case <-ticker.C:
- node.doSync(bc, willJoinConsensus)
- case <-node.Consensus.BlockNumLowChan:
- node.doSync(bc, willJoinConsensus)
+ node.doSync(node.SyncInstance(), node.SyncingPeerProvider, bc, node.Consensus, willJoinConsensus)
}
}
}
// doSync keep the node in sync with other peers, willJoinConsensus means the node will try to join consensus after catch up
-func (node *Node) doSync(bc core.BlockChain, willJoinConsensus bool) {
-
- syncInstance := node.SyncInstance()
+func (node *Node) doSync(syncInstance ISync, syncingPeerProvider SyncingPeerProvider, bc core.BlockChain, consensus *consensus.Consensus, willJoinConsensus bool) {
if syncInstance.GetActivePeerNumber() < legacysync.NumPeersLowBound {
shardID := bc.ShardID()
- peers, err := node.SyncingPeerProvider.SyncingPeers(shardID)
+ peers, err := syncingPeerProvider.SyncingPeers(shardID)
if err != nil {
utils.Logger().Warn().
Err(err).
@@ -313,13 +316,13 @@ func (node *Node) doSync(bc core.BlockChain, willJoinConsensus bool) {
if isSynchronized, _, _ := syncInstance.GetParsedSyncStatusDoubleChecked(); !isSynchronized {
node.IsSynchronized.UnSet()
if willJoinConsensus {
- node.Consensus.BlocksNotSynchronized()
+ consensus.BlocksNotSynchronized("node.doSync")
}
isBeacon := bc.ShardID() == shard.BeaconChainShardID
- syncInstance.SyncLoop(bc, isBeacon, node.Consensus, legacysync.LoopMinTime)
+ syncInstance.SyncLoop(bc, isBeacon, consensus, legacysync.LoopMinTime)
if willJoinConsensus {
node.IsSynchronized.Set()
- node.Consensus.BlocksSynchronized()
+ consensus.BlocksSynchronized()
}
}
node.IsSynchronized.Set()
@@ -415,7 +418,7 @@ func (node *Node) SendNewBlockToUnsync() {
utils.Logger().Warn().Msg("[SYNC] unable to encode block to hashes")
continue
}
- blockWithSigBytes, err := node.getEncodedBlockWithSigFromBlock(block)
+ blockWithSigBytes, err := getEncodedBlockWithSigFromBlock(block)
if err != nil {
utils.Logger().Warn().Err(err).Msg("[SYNC] rlp encode BlockWithSig")
continue
@@ -747,7 +750,7 @@ func (node *Node) getEncodedBlockWithSigByHeight(height uint64) ([]byte, error)
return b, nil
}
-func (node *Node) getEncodedBlockWithSigFromBlock(block *types.Block) ([]byte, error) {
+func getEncodedBlockWithSigFromBlock(block *types.Block) ([]byte, error) {
bwh := legacysync.BlockWithSig{
Block: block,
CommitSigAndBitmap: block.GetCurrentCommitSig(),
diff --git a/p2p/host.go b/p2p/host.go
index 31c9c2c44a..4a4240c565 100644
--- a/p2p/host.go
+++ b/p2p/host.go
@@ -11,6 +11,13 @@ import (
"sync"
"time"
+ "github.com/harmony-one/bls/ffi/go/bls"
+ nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
+ "github.com/harmony-one/harmony/internal/utils"
+ "github.com/harmony-one/harmony/internal/utils/blockedpeers"
+ "github.com/harmony-one/harmony/p2p/discovery"
+ "github.com/harmony-one/harmony/p2p/security"
+ sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/libp2p/go-libp2p"
dht "github.com/libp2p/go-libp2p-kad-dht"
libp2p_pubsub "github.com/libp2p/go-libp2p-pubsub"
@@ -24,19 +31,11 @@ import (
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/core/routing"
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
-
"github.com/libp2p/go-libp2p/p2p/security/noise"
libp2ptls "github.com/libp2p/go-libp2p/p2p/security/tls"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/rs/zerolog"
-
- "github.com/harmony-one/bls/ffi/go/bls"
- nodeconfig "github.com/harmony-one/harmony/internal/configs/node"
- "github.com/harmony-one/harmony/internal/utils"
- "github.com/harmony-one/harmony/p2p/discovery"
- "github.com/harmony-one/harmony/p2p/security"
- sttypes "github.com/harmony-one/harmony/p2p/stream/types"
)
type ConnectCallback func(net libp2p_network.Network, conn libp2p_network.Conn) error
@@ -254,7 +253,8 @@ func NewHost(cfg HostConfig) (Host, error) {
self.PeerID = p2pHost.ID()
subLogger := utils.Logger().With().Str("hostID", p2pHost.ID().Pretty()).Logger()
- security := security.NewManager(cfg.MaxConnPerIP, cfg.MaxPeers)
+ banned := blockedpeers.NewManager(1024)
+ security := security.NewManager(cfg.MaxConnPerIP, int(cfg.MaxPeers), banned)
// has to save the private key for host
h := &HostV2{
h: p2pHost,
@@ -269,6 +269,7 @@ func NewHost(cfg HostConfig) (Host, error) {
logger: &subLogger,
ctx: ctx,
cancel: cancel,
+ banned: banned,
}
utils.Logger().Info().
@@ -323,6 +324,7 @@ type HostV2 struct {
onDisconnects DisconnectCallbacks
ctx context.Context
cancel func()
+ banned *blockedpeers.Manager
}
// PubSub ..
@@ -492,9 +494,7 @@ func (host *HostV2) ListPeer(topic string) []libp2p_peer.ID {
// ListBlockedPeer returns list of blocked peer
func (host *HostV2) ListBlockedPeer() []libp2p_peer.ID {
- // TODO: this is a place holder for now
- peers := make([]libp2p_peer.ID, 0)
- return peers
+ return host.banned.Keys()
}
// GetPeerCount ...
diff --git a/p2p/security/security.go b/p2p/security/security.go
index 932f8b6e9a..d363a96470 100644
--- a/p2p/security/security.go
+++ b/p2p/security/security.go
@@ -3,9 +3,10 @@ package security
import (
"fmt"
"sync"
- "sync/atomic"
+ "time"
"github.com/harmony-one/harmony/internal/utils"
+ "github.com/harmony-one/harmony/internal/utils/blockedpeers"
libp2p_network "github.com/libp2p/go-libp2p/core/network"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
@@ -16,58 +17,56 @@ type Security interface {
OnDisconnectCheck(conn libp2p_network.Conn) error
}
-type Manager struct {
- maxConnPerIP int
- maxPeers int64
+type peerMap struct {
+ peers map[string][]string
+}
- mutex sync.Mutex
- peers peerMap // All the connected nodes, key is the Peer's IP, value is the peer's ID array
+func newPeersMap() *peerMap {
+ return &peerMap{
+ peers: make(map[string][]string),
+ }
}
-type peerMap struct {
- count int64
- peers sync.Map
+func (peerMap *peerMap) Len() int {
+ return len(peerMap.peers)
}
-func (peerMap *peerMap) Len() int64 {
- return atomic.LoadInt64(&peerMap.count)
+func (peerMap *peerMap) Store(key string, value []string) {
+ peerMap.peers[key] = value
}
-func (peerMap *peerMap) Store(key, value interface{}) {
- // only increment if you didn't have this key
- hasKey := peerMap.HasKey(key)
- peerMap.peers.Store(key, value)
- if !hasKey {
- atomic.AddInt64(&peerMap.count, 1)
- }
+func (peerMap *peerMap) HasKey(key string) bool {
+ _, ok := peerMap.peers[key]
+ return ok
}
-func (peerMap *peerMap) HasKey(key interface{}) bool {
- hasKey := false
- peerMap.peers.Range(func(k, v interface{}) bool {
- if k == key {
- hasKey = true
- return false
- }
- return true
- })
- return hasKey
+func (peerMap *peerMap) Delete(key string) {
+ delete(peerMap.peers, key)
}
-func (peerMap *peerMap) Delete(key interface{}) {
- peerMap.peers.Delete(key)
- atomic.AddInt64(&peerMap.count, -1)
+func (peerMap *peerMap) Load(key string) (value []string, ok bool) {
+ value, ok = peerMap.peers[key]
+ return value, ok
}
-func (peerMap *peerMap) Load(key interface{}) (value interface{}, ok bool) {
- return peerMap.peers.Load(key)
+func (peerMap *peerMap) Range(f func(key string, value []string) bool) {
+ for key, value := range peerMap.peers {
+ if !f(key, value) {
+ break
+ }
+ }
}
-func (peerMap *peerMap) Range(f func(key, value any) bool) {
- peerMap.peers.Range(f)
+type Manager struct {
+ maxConnPerIP int
+ maxPeers int
+
+ mutex sync.Mutex
+ peers *peerMap // All the connected nodes, key is the Peer's IP, value is the peer's ID array
+ banned *blockedpeers.Manager
}
-func NewManager(maxConnPerIP int, maxPeers int64) *Manager {
+func NewManager(maxConnPerIP int, maxPeers int, banned *blockedpeers.Manager) *Manager {
if maxConnPerIP < 0 {
panic("maximum connections per IP must not be negative")
}
@@ -77,9 +76,17 @@ func NewManager(maxConnPerIP int, maxPeers int64) *Manager {
return &Manager{
maxConnPerIP: maxConnPerIP,
maxPeers: maxPeers,
+ peers: newPeersMap(),
+ banned: banned,
}
}
+func (m *Manager) RangePeers(f func(key string, value []string) bool) {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+ m.peers.Range(f)
+}
+
func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network.Conn) error {
m.mutex.Lock()
defer m.mutex.Unlock()
@@ -89,19 +96,11 @@ func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network
return errors.Wrap(err, "failed on get remote ip")
}
- value, ok := m.peers.Load(remoteIp)
- if !ok {
- value = []string{}
- }
-
- peers, ok := value.([]string)
- if !ok {
- return errors.New("peers info type err")
- }
+ peers, _ := m.peers.Load(remoteIp)
// avoid add repeatedly
peerID := conn.RemotePeer().String()
- _, ok = find(peers, peerID)
+ _, ok := find(peers, peerID)
if !ok {
peers = append(peers, peerID)
}
@@ -118,11 +117,18 @@ func (m *Manager) OnConnectCheck(net libp2p_network.Network, conn libp2p_network
// only limit addition if it's a new peer and not an existing peer with new connection
if m.maxPeers > 0 && currentPeerCount >= m.maxPeers && !m.peers.HasKey(remoteIp) {
utils.Logger().Warn().
- Int64("connected peers", currentPeerCount).
+ Int("connected peers", currentPeerCount).
Str("new peer", remoteIp).
Msg("too many peers, closing")
return net.ClosePeer(conn.RemotePeer())
}
+ if m.banned.IsBanned(conn.RemotePeer(), time.Now()) {
+ utils.Logger().Warn().
+ Str("new peer", remoteIp).
+ Msg("peer is banned, closing")
+ return net.ClosePeer(conn.RemotePeer())
+ }
+
m.peers.Store(remoteIp, peers)
return nil
}
@@ -136,16 +142,11 @@ func (m *Manager) OnDisconnectCheck(conn libp2p_network.Conn) error {
return errors.Wrap(err, "failed on get ip")
}
- value, ok := m.peers.Load(ip)
+ peers, ok := m.peers.Load(ip)
if !ok {
return nil
}
- peers, ok := value.([]string)
- if !ok {
- return errors.New("peers info type err")
- }
-
peerID := conn.RemotePeer().String()
index, ok := find(peers, peerID)
if ok {
diff --git a/p2p/security/security_test.go b/p2p/security/security_test.go
index 73ce4741e7..3ebe589461 100644
--- a/p2p/security/security_test.go
+++ b/p2p/security/security_test.go
@@ -3,9 +3,11 @@ package security
import (
"context"
"fmt"
+ "net"
+ "sync"
"testing"
- "time"
+ "github.com/harmony-one/harmony/internal/utils/blockedpeers"
"github.com/libp2p/go-libp2p"
ic "github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/host"
@@ -13,6 +15,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
type ConnectCallback func(net libp2p_network.Network, conn libp2p_network.Conn) error
@@ -51,78 +54,107 @@ func (mh *fakeHost) SetDisconnectCallback(callback DisconnectCallback) {
mh.onDisconnects = append(mh.onDisconnects, callback)
}
+func GetFreePort(t *testing.T) int {
+ t.Helper()
+ a, err := net.ResolveTCPAddr("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ return 0
+ } else {
+ l, err := net.ListenTCP("tcp", a)
+ if err != nil {
+ t.Fatal(err)
+ return 0
+ } else {
+ defer l.Close()
+ return l.Addr().(*net.TCPAddr).Port
+ }
+ }
+}
+
func TestManager_OnConnectCheck(t *testing.T) {
- h1, err := newPeer(50550)
- assert.Nil(t, err)
+ h1, err := newPeer(GetFreePort(t))
+ require.NoError(t, err)
defer h1.Close()
fakeHost := &fakeHost{}
- security := NewManager(2, 1)
+ security := NewManager(2, 1, blockedpeers.NewManager(4))
h1.Network().Notify(fakeHost)
fakeHost.SetConnectCallback(security.OnConnectCheck)
fakeHost.SetDisconnectCallback(security.OnDisconnectCheck)
- h2, err := newPeer(50551)
+ h2, err := newPeer(GetFreePort(t))
assert.Nil(t, err)
defer h2.Close()
err = h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()})
- assert.Nil(t, err)
+ require.NoError(t, err)
- security.peers.Range(func(k, v interface{}) bool {
- peers := v.([]string)
+ security.RangePeers(func(k string, peers []string) bool {
assert.Equal(t, 1, len(peers))
return true
})
- h3, err := newPeer(50552)
+ h3, err := newPeer(GetFreePort(t))
assert.Nil(t, err)
defer h3.Close()
err = h3.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()})
assert.Nil(t, err)
- security.peers.Range(func(k, v interface{}) bool {
- peers := v.([]string)
- assert.Equal(t, 2, len(peers))
+ security.RangePeers(func(k string, peers []string) bool {
+ require.Equal(t, 2, len(peers))
return true
})
- h4, err := newPeer(50553)
+ h4, err := newPeer(GetFreePort(t))
assert.Nil(t, err)
defer h4.Close()
err = h4.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()})
assert.Nil(t, err)
- security.peers.Range(func(k, v interface{}) bool {
- peers := v.([]string)
- assert.Equal(t, 2, len(peers))
+ security.RangePeers(func(k string, peers []string) bool {
+ require.Equal(t, 2, len(peers))
return true
})
}
func TestManager_OnDisconnectCheck(t *testing.T) {
- h1, err := newPeer(50550)
+ h1, err := newPeer(GetFreePort(t))
assert.Nil(t, err)
defer h1.Close()
- fakeHost := &fakeHost{}
- security := NewManager(2, 0)
- h1.Network().Notify(fakeHost)
- fakeHost.SetConnectCallback(security.OnConnectCheck)
- fakeHost.SetDisconnectCallback(security.OnDisconnectCheck)
- h2, err := newPeer(50551)
+ h2, err := newPeer(GetFreePort(t))
assert.Nil(t, err)
defer h2.Close()
+
+ fakeHost := &fakeHost{}
+ security := NewManager(2, 0, blockedpeers.NewManager(4))
+ h1.Network().Notify(fakeHost)
+ var wrap = func() (
+ func(net libp2p_network.Network, conn libp2p_network.Conn) error,
+ func(conn libp2p_network.Conn) error,
+ *sync.WaitGroup) {
+ wg := &sync.WaitGroup{}
+ return func(net libp2p_network.Network, conn libp2p_network.Conn) error {
+ wg.Add(1)
+ return security.OnConnectCheck(net, conn)
+ }, func(conn libp2p_network.Conn) error {
+ defer wg.Done()
+ return security.OnDisconnectCheck(conn)
+ }, wg
+ }
+ OnConnectCheck, OnDisconnectCheck, wg := wrap()
+ fakeHost.SetConnectCallback(OnConnectCheck)
+ fakeHost.SetDisconnectCallback(OnDisconnectCheck)
+
err = h2.Connect(context.Background(), peer.AddrInfo{ID: h1.ID(), Addrs: h1.Network().ListenAddresses()})
assert.Nil(t, err)
- security.peers.Range(func(k, v interface{}) bool {
- peers := v.([]string)
+ security.RangePeers(func(k string, peers []string) bool {
assert.Equal(t, 1, len(peers))
return true
})
err = h2.Network().ClosePeer(h1.ID())
assert.Nil(t, err)
- time.Sleep(200 * time.Millisecond)
- security.peers.Range(func(k, v interface{}) bool {
- peers := v.([]string)
+ wg.Wait()
+ security.RangePeers(func(k string, peers []string) bool {
assert.Equal(t, 0, len(peers))
return true
})
diff --git a/p2p/stream/common/streammanager/streammanager.go b/p2p/stream/common/streammanager/streammanager.go
index 26025fb720..8273fea581 100644
--- a/p2p/stream/common/streammanager/streammanager.go
+++ b/p2p/stream/common/streammanager/streammanager.go
@@ -139,8 +139,8 @@ func (sm *streamManager) loop() {
discCancel() // cancel last discovery
}
discCtx, discCancel = context.WithCancel(sm.ctx)
- go func() {
- discovered, err := sm.discoverAndSetupStream(discCtx)
+ go func(ctx context.Context) {
+ discovered, err := sm.discoverAndSetupStream(ctx)
if err != nil {
sm.logger.Err(err)
}
@@ -152,7 +152,7 @@ func (sm *streamManager) loop() {
sm.coolDown.UnSet()
}()
}
- }()
+ }(discCtx)
case addStream := <-sm.addStreamCh:
err := sm.handleAddStream(addStream.st)
diff --git a/p2p/stream/protocols/sync/chain.go b/p2p/stream/protocols/sync/chain.go
index a095fffc1f..009c7b0afc 100644
--- a/p2p/stream/protocols/sync/chain.go
+++ b/p2p/stream/protocols/sync/chain.go
@@ -1,12 +1,21 @@
package sync
import (
+ Bytes "bytes"
+ "fmt"
+ "time"
+
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/light"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/consensus/engine"
+ "github.com/harmony-one/harmony/core/state"
"github.com/harmony-one/harmony/core/types"
shardingconfig "github.com/harmony-one/harmony/internal/configs/sharding"
+ "github.com/harmony-one/harmony/internal/utils"
"github.com/harmony-one/harmony/internal/utils/keylocker"
+ "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
"github.com/pkg/errors"
)
@@ -18,6 +27,10 @@ type chainHelper interface {
getBlocksByHashes(hs []common.Hash) ([]*types.Block, error)
getNodeData(hs []common.Hash) ([][]byte, error)
getReceipts(hs []common.Hash) ([]types.Receipts, error)
+ getAccountRange(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.AccountData, [][]byte, error)
+ getStorageRanges(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.StoragesData, [][]byte, error)
+ getByteCodes(hs []common.Hash, bytes uint64) ([][]byte, error)
+ getTrieNodes(root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, start time.Time) ([][]byte, error)
}
type chainHelperImpl struct {
@@ -158,7 +171,10 @@ func (ch *chainHelperImpl) getNodeData(hs []common.Hash) ([][]byte, error) {
entry, err = ch.chain.ValidatorCode(hash)
}
}
- if err == nil && len(entry) > 0 {
+ if err != nil {
+ return nil, err
+ }
+ if len(entry) > 0 {
nodes = append(nodes, entry)
bytes += len(entry)
}
@@ -182,3 +198,297 @@ func (ch *chainHelperImpl) getReceipts(hs []common.Hash) ([]types.Receipts, erro
}
return receipts, nil
}
+
+// getAccountRange
+func (ch *chainHelperImpl) getAccountRange(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.AccountData, [][]byte, error) {
+ if bytes > softResponseLimit {
+ bytes = softResponseLimit
+ }
+ // Retrieve the requested state and bail out if non existent
+ tr, err := trie.New(trie.StateTrieID(root), ch.chain.TrieDB())
+ if err != nil {
+ return nil, nil, err
+ }
+ snapshots := ch.chain.Snapshots()
+ if snapshots == nil {
+ return nil, nil, errors.Errorf("failed to retrieve snapshots")
+ }
+ it, err := snapshots.AccountIterator(root, origin)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Iterate over the requested range and pile accounts up
+ var (
+ accounts []*message.AccountData
+ size uint64
+ last common.Hash
+ )
+ for it.Next() {
+ hash, account := it.Hash(), common.CopyBytes(it.Account())
+
+ // Track the returned interval for the Merkle proofs
+ last = hash
+
+ // Assemble the reply item
+ size += uint64(common.HashLength + len(account))
+ accounts = append(accounts, &message.AccountData{
+ Hash: hash[:],
+ Body: account,
+ })
+ // If we've exceeded the request threshold, abort
+ if Bytes.Compare(hash[:], limit[:]) >= 0 {
+ break
+ }
+ if size > bytes {
+ break
+ }
+ }
+ it.Release()
+
+ // Generate the Merkle proofs for the first and last account
+ proof := light.NewNodeSet()
+ if err := tr.Prove(origin[:], 0, proof); err != nil {
+ utils.Logger().Warn().Err(err).Interface("origin", origin).Msg("Failed to prove account range")
+ return nil, nil, err
+ }
+ if last != (common.Hash{}) {
+ if err := tr.Prove(last[:], 0, proof); err != nil {
+ utils.Logger().Warn().Err(err).Interface("last", last).Msg("Failed to prove account range")
+ return nil, nil, err
+ }
+ }
+ var proofs [][]byte
+ for _, blob := range proof.NodeList() {
+ proofs = append(proofs, blob)
+ }
+ return accounts, proofs, nil
+}
+
+// getStorageRangesRequest
+func (ch *chainHelperImpl) getStorageRanges(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.StoragesData, [][]byte, error) {
+ if bytes > softResponseLimit {
+ bytes = softResponseLimit
+ }
+
+ // Calculate the hard limit at which to abort, even if mid storage trie
+ hardLimit := uint64(float64(bytes) * (1 + stateLookupSlack))
+
+ // Retrieve storage ranges until the packet limit is reached
+ var (
+ slots []*message.StoragesData
+ proofs [][]byte
+ size uint64
+ )
+ snapshots := ch.chain.Snapshots()
+ if snapshots == nil {
+ return nil, nil, errors.Errorf("failed to retrieve snapshots")
+ }
+ for _, account := range accounts {
+ // If we've exceeded the requested data limit, abort without opening
+ // a new storage range (that we'd need to prove due to exceeded size)
+ if size >= bytes {
+ break
+ }
+ // The first account might start from a different origin and end sooner
+ // origin==nil or limit ==nil
+ // Retrieve the requested state and bail out if non existent
+ it, err := snapshots.StorageIterator(root, account, origin)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Iterate over the requested range and pile slots up
+ var (
+ storage []*message.StorageData
+ last common.Hash
+ abort bool
+ )
+ for it.Next() {
+ if size >= hardLimit {
+ abort = true
+ break
+ }
+ hash, slot := it.Hash(), common.CopyBytes(it.Slot())
+
+ // Track the returned interval for the Merkle proofs
+ last = hash
+
+ // Assemble the reply item
+ size += uint64(common.HashLength + len(slot))
+ storage = append(storage, &message.StorageData{
+ Hash: hash[:],
+ Body: slot,
+ })
+ // If we've exceeded the request threshold, abort
+ if Bytes.Compare(hash[:], limit[:]) >= 0 {
+ break
+ }
+ }
+
+ if len(storage) > 0 {
+ storages := &message.StoragesData{
+ Data: storage,
+ }
+ slots = append(slots, storages)
+ }
+ it.Release()
+
+ // Generate the Merkle proofs for the first and last storage slot, but
+ // only if the response was capped. If the entire storage trie included
+ // in the response, no need for any proofs.
+ if origin != (common.Hash{}) || (abort && len(storage) > 0) {
+ // Request started at a non-zero hash or was capped prematurely, add
+ // the endpoint Merkle proofs
+ accTrie, err := trie.NewStateTrie(trie.StateTrieID(root), ch.chain.TrieDB())
+ if err != nil {
+ return nil, nil, err
+ }
+ acc, err := accTrie.TryGetAccountByHash(account)
+ if err != nil || acc == nil {
+ return nil, nil, err
+ }
+ id := trie.StorageTrieID(root, account, acc.Root)
+ stTrie, err := trie.NewStateTrie(id, ch.chain.TrieDB())
+ if err != nil {
+ return nil, nil, err
+ }
+ proof := light.NewNodeSet()
+ if err := stTrie.Prove(origin[:], 0, proof); err != nil {
+ utils.Logger().Warn().Interface("origin", origin).Msg("Failed to prove storage range")
+ return nil, nil, err
+ }
+ if last != (common.Hash{}) {
+ if err := stTrie.Prove(last[:], 0, proof); err != nil {
+ utils.Logger().Warn().Interface("last", last).Msg("Failed to prove storage range")
+ return nil, nil, err
+ }
+ }
+ for _, blob := range proof.NodeList() {
+ proofs = append(proofs, blob)
+ }
+ // Proof terminates the reply as proofs are only added if a node
+ // refuses to serve more data (exception when a contract fetch is
+ // finishing, but that's that).
+ break
+ }
+ }
+ return slots, proofs, nil
+}
+
+// getByteCodesRequest
+func (ch *chainHelperImpl) getByteCodes(hashes []common.Hash, bytes uint64) ([][]byte, error) {
+ if bytes > softResponseLimit {
+ bytes = softResponseLimit
+ }
+ if len(hashes) > maxCodeLookups {
+ hashes = hashes[:maxCodeLookups]
+ }
+ // Retrieve bytecodes until the packet size limit is reached
+ var (
+ codes [][]byte
+ totalBytes uint64
+ )
+ for _, hash := range hashes {
+ if hash == state.EmptyCodeHash {
+ // Peers should not request the empty code, but if they do, at
+ // least sent them back a correct response without db lookups
+ codes = append(codes, []byte{})
+ } else if blob, err := ch.chain.ContractCode(hash); err == nil { // Double Check: ContractCodeWithPrefix
+ codes = append(codes, blob)
+ totalBytes += uint64(len(blob))
+ }
+ if totalBytes > bytes {
+ break
+ }
+ }
+ return codes, nil
+}
+
+// getTrieNodesRequest
+func (ch *chainHelperImpl) getTrieNodes(root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, start time.Time) ([][]byte, error) {
+ if bytes > softResponseLimit {
+ bytes = softResponseLimit
+ }
+ // Make sure we have the state associated with the request
+ triedb := ch.chain.TrieDB()
+
+ accTrie, err := trie.NewStateTrie(trie.StateTrieID(root), triedb)
+ if err != nil {
+ // We don't have the requested state available, bail out
+ return nil, nil
+ }
+ // The 'snap' might be nil, in which case we cannot serve storage slots.
+ snapshots := ch.chain.Snapshots()
+ if snapshots == nil {
+ return nil, errors.Errorf("failed to retrieve snapshots")
+ }
+ snap := snapshots.Snapshot(root)
+ // Retrieve trie nodes until the packet size limit is reached
+ var (
+ nodes [][]byte
+ TotalBytes uint64
+ loads int // Trie hash expansions to count database reads
+ )
+ for _, p := range paths {
+ switch len(p.Pathset) {
+ case 0:
+ // Ensure we penalize invalid requests
+ return nil, fmt.Errorf("zero-item pathset requested")
+
+ case 1:
+ // If we're only retrieving an account trie node, fetch it directly
+ blob, resolved, err := accTrie.TryGetNode(p.Pathset[0])
+ loads += resolved // always account database reads, even for failures
+ if err != nil {
+ break
+ }
+ nodes = append(nodes, blob)
+ TotalBytes += uint64(len(blob))
+
+ default:
+ var stRoot common.Hash
+ // Storage slots requested, open the storage trie and retrieve from there
+ if snap == nil {
+ // We don't have the requested state snapshotted yet (or it is stale),
+ // but can look up the account via the trie instead.
+ account, err := accTrie.TryGetAccountByHash(common.BytesToHash(p.Pathset[0]))
+ loads += 8 // We don't know the exact cost of lookup, this is an estimate
+ if err != nil || account == nil {
+ break
+ }
+ stRoot = account.Root
+ } else {
+ account, err := snap.Account(common.BytesToHash(p.Pathset[0]))
+ loads++ // always account database reads, even for failures
+ if err != nil || account == nil {
+ break
+ }
+ stRoot = common.BytesToHash(account.Root)
+ }
+ id := trie.StorageTrieID(root, common.BytesToHash(p.Pathset[0]), stRoot)
+ stTrie, err := trie.NewStateTrie(id, triedb)
+ loads++ // always account database reads, even for failures
+ if err != nil {
+ break
+ }
+ for _, path := range p.Pathset[1:] {
+ blob, resolved, err := stTrie.TryGetNode(path)
+ loads += resolved // always account database reads, even for failures
+ if err != nil {
+ break
+ }
+ nodes = append(nodes, blob)
+ TotalBytes += uint64(len(blob))
+
+ // Sanity check limits to avoid DoS on the store trie loads
+ if TotalBytes > bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
+ break
+ }
+ }
+ }
+ // Abort request processing if we've exceeded our limits
+ if TotalBytes > bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {
+ break
+ }
+ }
+ return nodes, nil
+}
diff --git a/p2p/stream/protocols/sync/chain_test.go b/p2p/stream/protocols/sync/chain_test.go
index 8d478e2b3f..414492054c 100644
--- a/p2p/stream/protocols/sync/chain_test.go
+++ b/p2p/stream/protocols/sync/chain_test.go
@@ -6,12 +6,15 @@ import (
"errors"
"fmt"
"math/big"
+ "time"
+ "unsafe"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/harmony/block"
"github.com/harmony-one/harmony/core/types"
+ "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
syncpb "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
)
@@ -60,6 +63,26 @@ func (tch *testChainHelper) getReceipts(hs []common.Hash) ([]types.Receipts, err
return receipts, nil
}
+func (ch *testChainHelper) getAccountRange(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.AccountData, [][]byte, error) {
+ testAccountRanges, testProofs := makeTestAccountRanges(2)
+ return testAccountRanges, testProofs, nil
+}
+
+func (ch *testChainHelper) getStorageRanges(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) ([]*message.StoragesData, [][]byte, error) {
+ testSlots, testProofs := makeTestStorageRanges(2)
+ return testSlots, testProofs, nil
+}
+
+func (ch *testChainHelper) getByteCodes(hs []common.Hash, bytes uint64) ([][]byte, error) {
+ testByteCodes := makeTestByteCodes(2)
+ return testByteCodes, nil
+}
+
+func (ch *testChainHelper) getTrieNodes(root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, start time.Time) ([][]byte, error) {
+ testTrieNodes := makeTestTrieNodes(2)
+ return testTrieNodes, nil
+}
+
func checkGetReceiptsResult(b []byte, hs []common.Hash) error {
var msg = &syncpb.Message{}
if err := protobuf.Unmarshal(b, msg); err != nil {
@@ -156,6 +179,57 @@ func makeTestReceipts(n int, nPerBlock int) []*types.Receipt {
return receipts
}
+func makeTestAccountRanges(n int) ([]*message.AccountData, [][]byte) {
+ accounts := make([]*message.AccountData, n)
+ proofs := make([][]byte, n)
+ for i := 0; i < n; i++ {
+ accounts[i] = &message.AccountData{
+ Hash: numberToHash(uint64(i * 2)).Bytes(),
+ Body: numberToHash(uint64(i*2 + 1)).Bytes(),
+ }
+ }
+ for i := 0; i < n; i++ {
+ proofs[i] = numberToHash(uint64(i)).Bytes()
+ }
+ return accounts, proofs
+}
+
+func makeTestStorageRanges(n int) ([]*message.StoragesData, [][]byte) {
+ slots := make([]*message.StoragesData, n)
+ proofs := make([][]byte, n)
+ for i := 0; i < n; i++ {
+ slots[i] = &message.StoragesData{
+ Data: make([]*syncpb.StorageData, 2),
+ }
+ for j := 0; j < 2; j++ {
+ slots[i].Data[j] = &message.StorageData{
+ Hash: numberToHash(uint64(i * 2)).Bytes(),
+ Body: numberToHash(uint64(i*2 + 1)).Bytes(),
+ }
+ }
+ }
+ for i := 0; i < n; i++ {
+ proofs[i] = numberToHash(uint64(i)).Bytes()
+ }
+ return slots, proofs
+}
+
+func makeTestByteCodes(n int) [][]byte {
+ byteCodes := make([][]byte, n)
+ for i := 0; i < n; i++ {
+ byteCodes[i] = numberToHash(uint64(i)).Bytes()
+ }
+ return byteCodes
+}
+
+func makeTestTrieNodes(n int) [][]byte {
+ trieNodes := make([][]byte, n)
+ for i := 0; i < n; i++ {
+ trieNodes[i] = numberToHash(uint64(i)).Bytes()
+ }
+ return trieNodes
+}
+
func decodeBlocksBytes(bbs [][]byte) ([]*types.Block, error) {
blocks := make([]*types.Block, 0, len(bbs))
@@ -169,6 +243,19 @@ func decodeBlocksBytes(bbs [][]byte) ([]*types.Block, error) {
return blocks, nil
}
+func decodeHashBytes(hs [][]byte) ([]common.Hash, error) {
+ hashes := make([]common.Hash, 0)
+
+ for _, h := range hs {
+ var hash common.Hash
+ if err := rlp.DecodeBytes(h, &hash); err != nil {
+ return nil, err
+ }
+ hashes = append(hashes, hash)
+ }
+ return hashes, nil
+}
+
func checkBlockNumberResult(b []byte) error {
var msg = &syncpb.Message{}
if err := protobuf.Unmarshal(b, msg); err != nil {
@@ -230,3 +317,90 @@ func checkBlocksByHashesResult(b []byte, hs []common.Hash) error {
}
return nil
}
+
+func checkAccountRangeResult(bytes uint64, b []byte) error {
+ var msg = &syncpb.Message{}
+ if err := protobuf.Unmarshal(b, msg); err != nil {
+ return err
+ }
+ gbResp, err := msg.GetAccountRangesResponse()
+ if err != nil {
+ return err
+ }
+ if len(gbResp.Accounts) == 0 {
+ return errors.New("nil response from GetAccountRanges")
+ }
+ if len(gbResp.Proof) != len(gbResp.Accounts) {
+ return errors.New("unexpected proofs")
+ }
+ if len(b) > int(bytes) {
+ return errors.New("unexpected data bytes")
+ }
+ return nil
+}
+
+func checkStorageRangesResult(accounts []common.Hash, bytes uint64, b []byte) error {
+ var msg = &syncpb.Message{}
+ if err := protobuf.Unmarshal(b, msg); err != nil {
+ return err
+ }
+ gbResp, err := msg.GetStorageRangesResponse()
+ if err != nil {
+ return err
+ }
+ if len(gbResp.Slots) == 0 {
+ return errors.New("nil response from GetStorageRanges")
+ }
+ if len(gbResp.Slots) != len(gbResp.Proof) {
+ return errors.New("unexpected proofs")
+ }
+ sz := unsafe.Sizeof(gbResp.Slots)
+ if sz > uintptr(bytes) {
+ return errors.New("unexpected slot bytes")
+ }
+ return nil
+}
+
+func checkByteCodesResult(hs []common.Hash, bytes uint64, b []byte) error {
+ var msg = &syncpb.Message{}
+ if err := protobuf.Unmarshal(b, msg); err != nil {
+ return err
+ }
+ gbResp, err := msg.GetByteCodesResponse()
+ if err != nil {
+ return err
+ }
+ if len(gbResp.Codes) == 0 {
+ return errors.New("nil response from GetByteCodes")
+ }
+ if len(gbResp.Codes) != len(hs) {
+ return errors.New("unexpected byte codes")
+ }
+ sz := len(hs) * common.HashLength
+ if sz > int(bytes) {
+ return errors.New("unexpected data bytes")
+ }
+ return nil
+}
+
+func checkTrieNodesResult(hs []common.Hash, bytes uint64, b []byte) error {
+ var msg = &syncpb.Message{}
+ if err := protobuf.Unmarshal(b, msg); err != nil {
+ return err
+ }
+ gbResp, err := msg.GetTrieNodesResponse()
+ if err != nil {
+ return err
+ }
+ if len(gbResp.Nodes) == 0 {
+ return errors.New("nil response from checkGetTrieNodes")
+ }
+ if len(gbResp.Nodes) != len(hs) {
+ return errors.New("unexpected byte codes")
+ }
+ sz := len(hs) * common.HashLength
+ if sz > int(bytes) {
+ return errors.New("unexpected data bytes")
+ }
+ return nil
+}
diff --git a/p2p/stream/protocols/sync/client.go b/p2p/stream/protocols/sync/client.go
index 0b8a2a2fd9..45707e1191 100644
--- a/p2p/stream/protocols/sync/client.go
+++ b/p2p/stream/protocols/sync/client.go
@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto"
"github.com/harmony-one/harmony/core/types"
+ "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
syncpb "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
"github.com/pkg/errors"
@@ -181,6 +182,118 @@ func (p *Protocol) GetNodeData(ctx context.Context, hs []common.Hash, opts ...Op
return
}
+// GetAccountRange do getAccountRange through sync stream protocol.
+// returns the accounts along with proofs as result, target stream id, and error
+func (p *Protocol) GetAccountRange(ctx context.Context, root common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...Option) (accounts []*message.AccountData, proof [][]byte, stid sttypes.StreamID, err error) {
+ timer := p.doMetricClientRequest("getAccountRange")
+ defer p.doMetricPostClientRequest("getAccountRange", err, timer)
+
+ if bytes == 0 {
+ err = fmt.Errorf("zero account ranges bytes requested")
+ return
+ }
+ if bytes > softResponseLimit {
+ err = fmt.Errorf("requested bytes exceed limit")
+ return
+ }
+ req := newGetAccountRangeRequest(root, origin, limit, bytes)
+ resp, stid, err := p.rm.DoRequest(ctx, req, opts...)
+ if err != nil {
+ return
+ }
+ accounts, proof, err = req.getAccountRangeFromResponse(resp)
+ return
+}
+
+// GetStorageRanges do getStorageRanges through sync stream protocol.
+// returns the slots along with proofs as result, target stream id, and error
+func (p *Protocol) GetStorageRanges(ctx context.Context, root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64, opts ...Option) (slots [][]*message.StorageData, proof [][]byte, stid sttypes.StreamID, err error) {
+ timer := p.doMetricClientRequest("getStorageRanges")
+ defer p.doMetricPostClientRequest("getStorageRanges", err, timer)
+
+ if bytes == 0 {
+ err = fmt.Errorf("zero storage ranges bytes requested")
+ return
+ }
+ if bytes > softResponseLimit {
+ err = fmt.Errorf("requested bytes exceed limit")
+ return
+ }
+ if len(accounts) > GetStorageRangesRequestCap {
+ err = fmt.Errorf("number of requested accounts exceed limit")
+ return
+ }
+ req := newGetStorageRangesRequest(root, accounts, origin, limit, bytes)
+ resp, stid, err := p.rm.DoRequest(ctx, req, opts...)
+ if err != nil {
+ return
+ }
+ var storages []*message.StoragesData
+ storages, proof, err = req.getStorageRangesFromResponse(resp)
+ if err != nil {
+ return
+ }
+ slots = make([][]*message.StorageData, 0)
+ for _, storage := range storages {
+ slots = append(slots, storage.Data)
+ }
+ return
+}
+
+// GetByteCodes do getByteCodes through sync stream protocol.
+// returns the codes as result, target stream id, and error
+func (p *Protocol) GetByteCodes(ctx context.Context, hs []common.Hash, bytes uint64, opts ...Option) (codes [][]byte, stid sttypes.StreamID, err error) {
+ timer := p.doMetricClientRequest("getByteCodes")
+ defer p.doMetricPostClientRequest("getByteCodes", err, timer)
+
+ if bytes == 0 {
+ err = fmt.Errorf("zero bytecode bytes requested")
+ return
+ }
+ if bytes > softResponseLimit {
+ err = fmt.Errorf("requested bytes exceed limit")
+ return
+ }
+ if len(hs) > GetByteCodesRequestCap {
+ err = fmt.Errorf("number of requested hashes exceed limit")
+ return
+ }
+ req := newGetByteCodesRequest(hs, bytes)
+ resp, stid, err := p.rm.DoRequest(ctx, req, opts...)
+ if err != nil {
+ return
+ }
+ codes, err = req.getByteCodesFromResponse(resp)
+ return
+}
+
+// GetTrieNodes do getTrieNodes through sync stream protocol.
+// returns the nodes as result, target stream id, and error
+func (p *Protocol) GetTrieNodes(ctx context.Context, root common.Hash, paths []*message.TrieNodePathSet, bytes uint64, opts ...Option) (nodes [][]byte, stid sttypes.StreamID, err error) {
+ timer := p.doMetricClientRequest("getTrieNodes")
+ defer p.doMetricPostClientRequest("getTrieNodes", err, timer)
+
+ if bytes == 0 {
+ err = fmt.Errorf("zero trie nodes bytes requested")
+ return
+ }
+ if bytes > softResponseLimit {
+ err = fmt.Errorf("requested bytes exceed limit")
+ return
+ }
+ if len(paths) > GetTrieNodesRequestCap {
+ err = fmt.Errorf("number of requested paths exceed limit")
+ return
+ }
+ req := newGetTrieNodesRequest(root, paths, bytes)
+ resp, stid, err := p.rm.DoRequest(ctx, req, opts...)
+ if err != nil {
+ return
+ }
+ nodes, err = req.getTrieNodesFromResponse(resp)
+ return
+}
+
// getBlocksByNumberRequest is the request for get block by numbers which implements
// sttypes.Request interface
type getBlocksByNumberRequest struct {
@@ -571,3 +684,304 @@ func (req *getReceiptsRequest) parseGetReceiptsBytes(resp *syncResponse) ([]type
}
return receipts, nil
}
+
+// getAccountRangeRequest is the request for get account ranges which implements
+// sttypes.Request interface
+type getAccountRangeRequest struct {
+ root common.Hash
+ origin common.Hash
+ limit common.Hash
+ bytes uint64
+ pbReq *syncpb.Request
+}
+
+func newGetAccountRangeRequest(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) *getAccountRangeRequest {
+ pbReq := syncpb.MakeGetAccountRangeRequest(root, origin, limit, bytes)
+ return &getAccountRangeRequest{
+ root: root,
+ origin: origin,
+ limit: limit,
+ bytes: bytes,
+ pbReq: pbReq,
+ }
+}
+
+func (req *getAccountRangeRequest) ReqID() uint64 {
+ return req.pbReq.GetReqId()
+}
+
+func (req *getAccountRangeRequest) SetReqID(val uint64) {
+ req.pbReq.ReqId = val
+}
+
+func (req *getAccountRangeRequest) String() string {
+ ss := make([]string, 0, 4)
+ ss = append(ss, req.root.String())
+ ss = append(ss, req.origin.String())
+ ss = append(ss, req.limit.String())
+ ss = append(ss, fmt.Sprint(req.bytes))
+ rqStr := strings.Join(ss, ",")
+ return fmt.Sprintf("REQUEST [GetAccountRange: %s]", rqStr)
+}
+
+func (req *getAccountRangeRequest) IsSupportedByProto(target sttypes.ProtoSpec) bool {
+ return target.Version.GreaterThanOrEqual(MinVersion)
+}
+
+func (req *getAccountRangeRequest) Encode() ([]byte, error) {
+ msg := syncpb.MakeMessageFromRequest(req.pbReq)
+ return protobuf.Marshal(msg)
+}
+
+func (req *getAccountRangeRequest) getAccountRangeFromResponse(resp sttypes.Response) ([]*message.AccountData, [][]byte, error) {
+ sResp, ok := resp.(*syncResponse)
+ if !ok || sResp == nil {
+ return nil, nil, errors.New("not sync response")
+ }
+ return req.parseGetAccountRangeResponse(sResp)
+}
+
+func (req *getAccountRangeRequest) parseGetAccountRangeResponse(resp *syncResponse) ([]*message.AccountData, [][]byte, error) {
+ if errResp := resp.pb.GetErrorResponse(); errResp != nil {
+ return nil, nil, errors.New(errResp.Error)
+ }
+ grResp := resp.pb.GetGetAccountRangeResponse()
+ if grResp == nil {
+ return nil, nil, errors.New("response not GetAccountRange")
+ }
+ proofs := make([][]byte, 0)
+ for _, proofBytes := range grResp.Proof {
+ var proof []byte
+ if err := rlp.DecodeBytes(proofBytes, &proof); err != nil {
+ return nil, nil, errors.Wrap(err, "[GetAccountRangeResponse]")
+ }
+ proofs = append(proofs, proof)
+ }
+ return grResp.Accounts, proofs, nil
+}
+
+// getStorageRangesRequest is the request for get storage ranges which implements
+// sttypes.Request interface
+type getStorageRangesRequest struct {
+ root common.Hash
+ accounts []common.Hash
+ origin common.Hash
+ limit common.Hash
+ bytes uint64
+ pbReq *syncpb.Request
+}
+
+func newGetStorageRangesRequest(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) *getStorageRangesRequest {
+ pbReq := syncpb.MakeGetStorageRangesRequest(root, accounts, origin, limit, bytes)
+ return &getStorageRangesRequest{
+ root: root,
+ accounts: accounts,
+ origin: origin,
+ limit: limit,
+ bytes: bytes,
+ pbReq: pbReq,
+ }
+}
+
+func (req *getStorageRangesRequest) ReqID() uint64 {
+ return req.pbReq.GetReqId()
+}
+
+func (req *getStorageRangesRequest) SetReqID(val uint64) {
+ req.pbReq.ReqId = val
+}
+
+func (req *getStorageRangesRequest) String() string {
+ ss := make([]string, 0, 4)
+ ss = append(ss, req.root.String())
+ for _, acc := range req.accounts {
+ ss = append(ss, acc.String())
+ }
+ ss = append(ss, req.origin.String())
+ ss = append(ss, req.limit.String())
+ ss = append(ss, fmt.Sprint(req.bytes))
+ rqStr := strings.Join(ss, ",")
+ return fmt.Sprintf("REQUEST [GetStorageRanges: %s]", rqStr)
+}
+
+func (req *getStorageRangesRequest) IsSupportedByProto(target sttypes.ProtoSpec) bool {
+ return target.Version.GreaterThanOrEqual(MinVersion)
+}
+
+func (req *getStorageRangesRequest) Encode() ([]byte, error) {
+ msg := syncpb.MakeMessageFromRequest(req.pbReq)
+ return protobuf.Marshal(msg)
+}
+
+// []*message.AccountData, []common.Hash
+func (req *getStorageRangesRequest) getStorageRangesFromResponse(resp sttypes.Response) ([]*message.StoragesData, [][]byte, error) {
+ sResp, ok := resp.(*syncResponse)
+ if !ok || sResp == nil {
+ return nil, nil, errors.New("not sync response")
+ }
+ return req.parseGetStorageRangesResponse(sResp)
+}
+
+func (req *getStorageRangesRequest) parseGetStorageRangesResponse(resp *syncResponse) ([]*message.StoragesData, [][]byte, error) {
+ if errResp := resp.pb.GetErrorResponse(); errResp != nil {
+ return nil, nil, errors.New(errResp.Error)
+ }
+ grResp := resp.pb.GetGetStorageRangesResponse()
+ if grResp == nil {
+ return nil, nil, errors.New("response not GetStorageRanges")
+ }
+ proofs := make([][]byte, 0)
+ for _, proofBytes := range grResp.Proof {
+ var proof []byte
+ if err := rlp.DecodeBytes(proofBytes, &proof); err != nil {
+ return nil, nil, errors.Wrap(err, "[GetStorageRangesResponse]")
+ }
+ proofs = append(proofs, proof)
+ }
+ return grResp.Slots, proofs, nil
+}
+
+// getByteCodesRequest is the request for get code bytes which implements
+// sttypes.Request interface
+type getByteCodesRequest struct {
+ hashes []common.Hash
+ bytes uint64
+ pbReq *syncpb.Request
+}
+
+func newGetByteCodesRequest(hashes []common.Hash, bytes uint64) *getByteCodesRequest {
+ pbReq := syncpb.MakeGetByteCodesRequest(hashes, bytes)
+ return &getByteCodesRequest{
+ hashes: hashes,
+ bytes: bytes,
+ pbReq: pbReq,
+ }
+}
+
+func (req *getByteCodesRequest) ReqID() uint64 {
+ return req.pbReq.GetReqId()
+}
+
+func (req *getByteCodesRequest) SetReqID(val uint64) {
+ req.pbReq.ReqId = val
+}
+
+func (req *getByteCodesRequest) String() string {
+ ss := make([]string, 0, 4)
+ for _, h := range req.hashes {
+ ss = append(ss, h.String())
+ }
+ ss = append(ss, fmt.Sprint(req.bytes))
+ rqStr := strings.Join(ss, ",")
+ return fmt.Sprintf("REQUEST [GetByteCodes: %s]", rqStr)
+}
+
+func (req *getByteCodesRequest) IsSupportedByProto(target sttypes.ProtoSpec) bool {
+ return target.Version.GreaterThanOrEqual(MinVersion)
+}
+
+func (req *getByteCodesRequest) Encode() ([]byte, error) {
+ msg := syncpb.MakeMessageFromRequest(req.pbReq)
+ return protobuf.Marshal(msg)
+}
+
+func (req *getByteCodesRequest) getByteCodesFromResponse(resp sttypes.Response) ([][]byte, error) {
+ sResp, ok := resp.(*syncResponse)
+ if !ok || sResp == nil {
+ return nil, errors.New("not sync response")
+ }
+ return req.parseGetByteCodesResponse(sResp)
+}
+
+func (req *getByteCodesRequest) parseGetByteCodesResponse(resp *syncResponse) ([][]byte, error) {
+ if errResp := resp.pb.GetErrorResponse(); errResp != nil {
+ return nil, errors.New(errResp.Error)
+ }
+ grResp := resp.pb.GetGetByteCodesResponse()
+ if grResp == nil {
+ return nil, errors.New("response not GetByteCodes")
+ }
+ codes := make([][]byte, 0)
+ for _, codeBytes := range grResp.Codes {
+ var code []byte
+ if err := rlp.DecodeBytes(codeBytes, &code); err != nil {
+ return nil, errors.Wrap(err, "[GetByteCodesResponse]")
+ }
+ codes = append(codes, code)
+ }
+ return codes, nil
+}
+
+// getTrieNodesRequest is the request for get trie nodes which implements
+// sttypes.Request interface
+type getTrieNodesRequest struct {
+ root common.Hash
+ paths []*message.TrieNodePathSet
+ bytes uint64
+ pbReq *syncpb.Request
+}
+
+func newGetTrieNodesRequest(root common.Hash, paths []*message.TrieNodePathSet, bytes uint64) *getTrieNodesRequest {
+ pbReq := syncpb.MakeGetTrieNodesRequest(root, paths, bytes)
+ return &getTrieNodesRequest{
+ root: root,
+ paths: paths,
+ bytes: bytes,
+ pbReq: pbReq,
+ }
+}
+
+func (req *getTrieNodesRequest) ReqID() uint64 {
+ return req.pbReq.GetReqId()
+}
+
+func (req *getTrieNodesRequest) SetReqID(val uint64) {
+ req.pbReq.ReqId = val
+}
+
+func (req *getTrieNodesRequest) String() string {
+ ss := make([]string, 0, 4)
+ ss = append(ss, req.root.String())
+ for _, p := range req.paths {
+ ss = append(ss, p.String())
+ }
+ ss = append(ss, fmt.Sprint(req.bytes))
+ rqStr := strings.Join(ss, ",")
+ return fmt.Sprintf("REQUEST [GetTrieNodes: %s]", rqStr)
+}
+
+func (req *getTrieNodesRequest) IsSupportedByProto(target sttypes.ProtoSpec) bool {
+ return target.Version.GreaterThanOrEqual(MinVersion)
+}
+
+func (req *getTrieNodesRequest) Encode() ([]byte, error) {
+ msg := syncpb.MakeMessageFromRequest(req.pbReq)
+ return protobuf.Marshal(msg)
+}
+
+func (req *getTrieNodesRequest) getTrieNodesFromResponse(resp sttypes.Response) ([][]byte, error) {
+ sResp, ok := resp.(*syncResponse)
+ if !ok || sResp == nil {
+ return nil, errors.New("not sync response")
+ }
+ return req.parseGetTrieNodesResponse(sResp)
+}
+
+func (req *getTrieNodesRequest) parseGetTrieNodesResponse(resp *syncResponse) ([][]byte, error) {
+ if errResp := resp.pb.GetErrorResponse(); errResp != nil {
+ return nil, errors.New(errResp.Error)
+ }
+ grResp := resp.pb.GetGetTrieNodesResponse()
+ if grResp == nil {
+ return nil, errors.New("response not GetTrieNodes")
+ }
+ nodes := make([][]byte, 0)
+ for _, codeBytes := range grResp.Nodes {
+ var code []byte
+ if err := rlp.DecodeBytes(codeBytes, &code); err != nil {
+ return nil, errors.Wrap(err, "[GetTrieNodesResponse]")
+ }
+ nodes = append(nodes, code)
+ }
+ return nodes, nil
+}
diff --git a/p2p/stream/protocols/sync/client_test.go b/p2p/stream/protocols/sync/client_test.go
index edfa126d0b..611afd7610 100644
--- a/p2p/stream/protocols/sync/client_test.go
+++ b/p2p/stream/protocols/sync/client_test.go
@@ -25,6 +25,8 @@ var (
_ sttypes.Request = &getBlockNumberRequest{}
_ sttypes.Request = &getReceiptsRequest{}
_ sttypes.Response = &syncResponse{&syncpb.Response{}}
+ // MaxHash represents the maximum possible hash value.
+ MaxHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)
var (
@@ -67,6 +69,69 @@ var (
testNodeDataResponse = syncpb.MakeGetNodeDataResponse(0, [][]byte{testNodeDataBytes})
+ account1 = common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")
+ account2 = common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")
+ resAccounts = []common.Hash{account1, account2}
+
+ accountsData = []*message.AccountData{
+ &syncpb.AccountData{
+ Hash: account1[:],
+ Body: common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000").Bytes(),
+ },
+ &syncpb.AccountData{
+ Hash: account2[:],
+ Body: common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000").Bytes(),
+ },
+ }
+
+ slots = []*syncpb.StoragesData{
+ &syncpb.StoragesData{
+ Data: []*syncpb.StorageData{
+ &syncpb.StorageData{
+ Hash: account1[:],
+ Body: common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000").Bytes(),
+ },
+ },
+ },
+ &syncpb.StoragesData{
+ Data: []*syncpb.StorageData{
+ &syncpb.StorageData{
+ Hash: account2[:],
+ Body: common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000").Bytes(),
+ },
+ },
+ },
+ }
+
+ proofBytes1, _ = rlp.EncodeToBytes(account1)
+ proofBytes2, _ = rlp.EncodeToBytes(account2)
+ proof = [][]byte{proofBytes1, proofBytes2}
+
+ codeBytes1, _ = rlp.EncodeToBytes(account1)
+ codeBytes2, _ = rlp.EncodeToBytes(account2)
+ testByteCodes = [][]byte{codeBytes1, codeBytes2}
+ dataNodeBytes1, _ = rlp.EncodeToBytes(numberToHash(1).Bytes())
+ dataNodeBytes2, _ = rlp.EncodeToBytes(numberToHash(2).Bytes())
+ testTrieNodes = [][]byte{dataNodeBytes1, dataNodeBytes2}
+ testPathSet = [][]byte{numberToHash(19850928).Bytes(), numberToHash(13640607).Bytes()}
+
+ testPaths = []*syncpb.TrieNodePathSet{
+ &syncpb.TrieNodePathSet{
+ Pathset: testPathSet,
+ },
+ &syncpb.TrieNodePathSet{
+ Pathset: testPathSet,
+ },
+ }
+
+ testAccountRangeResponse = syncpb.MakeGetAccountRangeResponse(0, accountsData, proof)
+
+ testStorageRangesResponse = syncpb.MakeGetStorageRangesResponse(0, slots, proof)
+
+ testByteCodesResponse = syncpb.MakeGetByteCodesResponse(0, testByteCodes)
+
+ testTrieNodesResponse = syncpb.MakeGetTrieNodesResponse(0, testTrieNodes)
+
testErrorResponse = syncpb.MakeErrorResponse(0, errors.New("test error"))
)
@@ -428,6 +493,267 @@ func TestProtocol_GetNodeData(t *testing.T) {
}
}
+func TestProtocol_GetAccountRange(t *testing.T) {
+ var (
+ root = numberToHash(1985082913640607)
+ ffHash = MaxHash
+ zero = common.Hash{}
+ )
+
+ tests := []struct {
+ getResponse getResponseFn
+ expErr error
+ expStID sttypes.StreamID
+ }{
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testAccountRangeResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: nil,
+ expStID: makeTestStreamID(0),
+ },
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testBlockResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: errors.New("response not GetAccountRange"),
+ expStID: makeTestStreamID(0),
+ },
+ {
+ getResponse: nil,
+ expErr: errors.New("get response error"),
+ expStID: "",
+ },
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testErrorResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: errors.New("test error"),
+ expStID: makeTestStreamID(0),
+ },
+ }
+
+ for i, test := range tests {
+ protocol := makeTestProtocol(test.getResponse)
+ accounts, proof, stid, err := protocol.GetAccountRange(context.Background(), root, zero, ffHash, uint64(100))
+
+ if assErr := assertError(err, test.expErr); assErr != nil {
+ t.Errorf("Test %v: %v", i, assErr)
+ continue
+ }
+ if stid != test.expStID {
+ t.Errorf("Test %v: unexpected st id: %v / %v", i, stid, test.expStID)
+ }
+ if test.expErr == nil {
+ if len(accounts) != len(proof) {
+ t.Errorf("accounts: %v", test.getResponse)
+ t.Errorf("accounts: %v", accounts)
+ t.Errorf("proof: %v", proof)
+ t.Errorf("Test %v: accounts size (%d) not equal to proof size (%d)", i, len(accounts), len(proof))
+ }
+ }
+ }
+}
+
+func TestProtocol_GetStorageRanges(t *testing.T) {
+ var (
+ root = numberToHash(1985082913640607)
+ firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
+ secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
+ testAccounts = []common.Hash{secondKey, firstKey}
+ ffHash = MaxHash
+ zero = common.Hash{}
+ )
+
+ tests := []struct {
+ getResponse getResponseFn
+ expErr error
+ expStID sttypes.StreamID
+ }{
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testStorageRangesResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: nil,
+ expStID: makeTestStreamID(0),
+ },
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testBlockResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: errors.New("response not GetStorageRanges"),
+ expStID: makeTestStreamID(0),
+ },
+ {
+ getResponse: nil,
+ expErr: errors.New("get response error"),
+ expStID: "",
+ },
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testErrorResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: errors.New("test error"),
+ expStID: makeTestStreamID(0),
+ },
+ }
+
+ for i, test := range tests {
+ protocol := makeTestProtocol(test.getResponse)
+ slots, proof, stid, err := protocol.GetStorageRanges(context.Background(), root, testAccounts, zero, ffHash, uint64(100))
+
+ if assErr := assertError(err, test.expErr); assErr != nil {
+ t.Errorf("Test %v: %v", i, assErr)
+ continue
+ }
+ if stid != test.expStID {
+ t.Errorf("Test %v: unexpected st id: %v / %v", i, stid, test.expStID)
+ }
+ if test.expErr == nil {
+ if len(slots) != len(testAccounts) {
+ t.Errorf("Test %v: slots size not equal to accounts size", i)
+ }
+ if len(slots) != len(proof) {
+ t.Errorf("Test %v: account size not equal to proof", i)
+ }
+ }
+ }
+}
+
+func TestProtocol_GetByteCodes(t *testing.T) {
+ tests := []struct {
+ getResponse getResponseFn
+ expErr error
+ expStID sttypes.StreamID
+ }{
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testByteCodesResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: nil,
+ expStID: makeTestStreamID(0),
+ },
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testBlockResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: errors.New("response not GetByteCodes"),
+ expStID: makeTestStreamID(0),
+ },
+ {
+ getResponse: nil,
+ expErr: errors.New("get response error"),
+ expStID: "",
+ },
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testErrorResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: errors.New("test error"),
+ expStID: makeTestStreamID(0),
+ },
+ }
+
+ for i, test := range tests {
+ protocol := makeTestProtocol(test.getResponse)
+ codes, stid, err := protocol.GetByteCodes(context.Background(), []common.Hash{numberToHash(19850829)}, uint64(500))
+
+ if assErr := assertError(err, test.expErr); assErr != nil {
+ t.Errorf("Test %v: %v", i, assErr)
+ continue
+ }
+ if stid != test.expStID {
+ t.Errorf("Test %v: unexpected st id: %v / %v", i, stid, test.expStID)
+ }
+ if test.expErr == nil {
+ if len(codes) != 2 {
+ t.Errorf("Test %v: size not 2", i)
+ }
+ }
+ }
+}
+
+func TestProtocol_GetTrieNodes(t *testing.T) {
+ var (
+ root = numberToHash(1985082913640607)
+ )
+
+ tests := []struct {
+ getResponse getResponseFn
+ expErr error
+ expStID sttypes.StreamID
+ }{
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testTrieNodesResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: nil,
+ expStID: makeTestStreamID(0),
+ },
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testBlockResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: errors.New("response not GetTrieNodes"),
+ expStID: makeTestStreamID(0),
+ },
+ {
+ getResponse: nil,
+ expErr: errors.New("get response error"),
+ expStID: "",
+ },
+ {
+ getResponse: func(request sttypes.Request) (sttypes.Response, sttypes.StreamID) {
+ return &syncResponse{
+ pb: testErrorResponse,
+ }, makeTestStreamID(0)
+ },
+ expErr: errors.New("test error"),
+ expStID: makeTestStreamID(0),
+ },
+ }
+
+ for i, test := range tests {
+ protocol := makeTestProtocol(test.getResponse)
+ nodes, stid, err := protocol.GetTrieNodes(context.Background(), root, testPaths, uint64(500))
+
+ if assErr := assertError(err, test.expErr); assErr != nil {
+ t.Errorf("Test %v: %v", i, assErr)
+ continue
+ }
+ if stid != test.expStID {
+ t.Errorf("Test %v: unexpected st id: %v / %v", i, stid, test.expStID)
+ }
+ if test.expErr == nil {
+ if len(nodes) != 2 {
+ t.Errorf("Test %v: size not 2", i)
+ }
+ }
+ }
+}
+
type getResponseFn func(request sttypes.Request) (sttypes.Response, sttypes.StreamID)
type testHostRequestManager struct {
diff --git a/p2p/stream/protocols/sync/const.go b/p2p/stream/protocols/sync/const.go
index b4cf4410af..d606a46751 100644
--- a/p2p/stream/protocols/sync/const.go
+++ b/p2p/stream/protocols/sync/const.go
@@ -25,6 +25,39 @@ const (
// This number has an effect on maxMsgBytes as 20MB defined in github.com/harmony-one/harmony/p2p/stream/types.
GetReceiptsCap = 128
+ // GetStorageRangesRequestCap is the cap of request of single GetStorageRanges request
+ // This number has an effect on maxMsgBytes as 20MB defined in github.com/harmony-one/harmony/p2p/stream/types.
+ GetStorageRangesRequestCap = 256
+
+ // GetByteCodesRequestCap is the cap of request of single GetByteCodes request
+ // This number has an effect on maxMsgBytes as 20MB defined in github.com/harmony-one/harmony/p2p/stream/types.
+ GetByteCodesRequestCap = 128
+
+ // GetTrieNodesRequestCap is the cap of request of single GetTrieNodes request
+ // This number has an effect on maxMsgBytes as 20MB defined in github.com/harmony-one/harmony/p2p/stream/types.
+ GetTrieNodesRequestCap = 128
+
+ // stateLookupSlack defines the ratio by how much a state response can exceed
+ // the requested limit in order to try and avoid breaking up contracts into
+ // multiple packages and proving them.
+ stateLookupSlack = 0.1
+
+ // softResponseLimit is the target maximum size of replies to data retrievals.
+ softResponseLimit = 2 * 1024 * 1024
+
+ // maxCodeLookups is the maximum number of bytecodes to serve. This number is
+ // there to limit the number of disk lookups.
+ maxCodeLookups = 1024
+
+ // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This
+ // number is there to limit the number of disk lookups.
+ maxTrieNodeLookups = 1024
+
+ // maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.
+ // If we spend too much time, then it's a fairly high chance of timing out
+ // at the remote side, which means all the work is in vain.
+ maxTrieNodeTimeSpent = 5 * time.Second
+
// MaxStreamFailures is the maximum allowed failures before stream gets removed
MaxStreamFailures = 5
diff --git a/p2p/stream/protocols/sync/message/compose.go b/p2p/stream/protocols/sync/message/compose.go
index 2c0c367098..3be09da5b7 100644
--- a/p2p/stream/protocols/sync/message/compose.go
+++ b/p2p/stream/protocols/sync/message/compose.go
@@ -68,6 +68,60 @@ func MakeGetReceiptsRequest(hashes []common.Hash) *Request {
}
}
+// MakeGetAccountRangeRequest makes the GetAccountRange request
+func MakeGetAccountRangeRequest(root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) *Request {
+ return &Request{
+ Request: &Request_GetAccountRangeRequest{
+ GetAccountRangeRequest: &GetAccountRangeRequest{
+ Root: root[:],
+ Origin: origin[:],
+ Limit: limit[:],
+ Bytes: bytes,
+ },
+ },
+ }
+}
+
+// MakeGetStorageRangesRequest makes the GetStorageRanges request
+func MakeGetStorageRangesRequest(root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) *Request {
+ return &Request{
+ Request: &Request_GetStorageRangesRequest{
+ GetStorageRangesRequest: &GetStorageRangesRequest{
+ Root: root[:],
+ Accounts: hashesToBytes(accounts),
+ Origin: origin[:],
+ Limit: limit[:],
+ Bytes: bytes,
+ },
+ },
+ }
+}
+
+// MakeGetByteCodesRequest makes the GetByteCodes request
+func MakeGetByteCodesRequest(hashes []common.Hash, bytes uint64) *Request {
+ return &Request{
+ Request: &Request_GetByteCodesRequest{
+ GetByteCodesRequest: &GetByteCodesRequest{
+ Hashes: hashesToBytes(hashes),
+ Bytes: bytes,
+ },
+ },
+ }
+}
+
+// MakeGetTrieNodesRequest makes the GetTrieNodes request
+func MakeGetTrieNodesRequest(root common.Hash, paths []*TrieNodePathSet, bytes uint64) *Request {
+ return &Request{
+ Request: &Request_GetTrieNodesRequest{
+ GetTrieNodesRequest: &GetTrieNodesRequest{
+ Root: root[:],
+ Paths: paths,
+ Bytes: bytes,
+ },
+ },
+ }
+}
+
// MakeErrorResponse makes the error response
func MakeErrorResponseMessage(rid uint64, err error) *Message {
resp := MakeErrorResponse(rid, err)
@@ -196,6 +250,80 @@ func MakeGetReceiptsResponse(rid uint64, receipts map[uint64]*Receipts) *Respons
}
}
+// MakeGetAccountRangeResponseMessage makes the GetAccountRangeResponse of Message type
+func MakeGetAccountRangeResponseMessage(rid uint64, accounts []*AccountData, proof [][]byte) *Message {
+ resp := MakeGetAccountRangeResponse(rid, accounts, proof)
+ return makeMessageFromResponse(resp)
+}
+
+// MakeGetAccountRangeResponse make the GetAccountRangeResponse of Response type
+func MakeGetAccountRangeResponse(rid uint64, accounts []*AccountData, proof [][]byte) *Response {
+ return &Response{
+ ReqId: rid,
+ Response: &Response_GetAccountRangeResponse{
+ GetAccountRangeResponse: &GetAccountRangeResponse{
+ Accounts: accounts,
+ Proof: proof,
+ },
+ },
+ }
+}
+
+// MakeGetStorageRangesResponseMessage makes the GetStorageRangesResponse of Message type
+func MakeGetStorageRangesResponseMessage(rid uint64, slots []*StoragesData, proof [][]byte) *Message {
+ resp := MakeGetStorageRangesResponse(rid, slots, proof)
+ return makeMessageFromResponse(resp)
+}
+
+// MakeGetStorageRangesResponse make the GetStorageRangesResponse of Response type
+func MakeGetStorageRangesResponse(rid uint64, slots []*StoragesData, proof [][]byte) *Response {
+ return &Response{
+ ReqId: rid,
+ Response: &Response_GetStorageRangesResponse{
+ GetStorageRangesResponse: &GetStorageRangesResponse{
+ Slots: slots,
+ Proof: proof,
+ },
+ },
+ }
+}
+
+// MakeGetByteCodesResponseMessage makes the GetByteCodesResponse of Message type
+func MakeGetByteCodesResponseMessage(rid uint64, codes [][]byte) *Message {
+ resp := MakeGetByteCodesResponse(rid, codes)
+ return makeMessageFromResponse(resp)
+}
+
+// MakeGetByteCodesResponse make the GetByteCodesResponse of Response type
+func MakeGetByteCodesResponse(rid uint64, codes [][]byte) *Response {
+ return &Response{
+ ReqId: rid,
+ Response: &Response_GetByteCodesResponse{
+ GetByteCodesResponse: &GetByteCodesResponse{
+ Codes: codes,
+ },
+ },
+ }
+}
+
+// MakeGetTrieNodesResponseMessage makes the GetTrieNodesResponse of Message type
+func MakeGetTrieNodesResponseMessage(rid uint64, nodes [][]byte) *Message {
+ resp := MakeGetTrieNodesResponse(rid, nodes)
+ return makeMessageFromResponse(resp)
+}
+
+// MakeGetTrieNodesResponse make the GetTrieNodesResponse of Response type
+func MakeGetTrieNodesResponse(rid uint64, nodes [][]byte) *Response {
+ return &Response{
+ ReqId: rid,
+ Response: &Response_GetTrieNodesResponse{
+ GetTrieNodesResponse: &GetTrieNodesResponse{
+ Nodes: nodes,
+ },
+ },
+ }
+}
+
// MakeMessageFromRequest makes a message from the request
func MakeMessageFromRequest(req *Request) *Message {
return &Message{
diff --git a/p2p/stream/protocols/sync/message/msg.pb.go b/p2p/stream/protocols/sync/message/msg.pb.go
index be7a95995f..db37f7c7da 100644
--- a/p2p/stream/protocols/sync/message/msg.pb.go
+++ b/p2p/stream/protocols/sync/message/msg.pb.go
@@ -115,6 +115,10 @@ type Request struct {
// *Request_GetBlocksByHashesRequest
// *Request_GetNodeDataRequest
// *Request_GetReceiptsRequest
+ // *Request_GetAccountRangeRequest
+ // *Request_GetStorageRangesRequest
+ // *Request_GetByteCodesRequest
+ // *Request_GetTrieNodesRequest
Request isRequest_Request `protobuf_oneof:"request"`
}
@@ -206,6 +210,34 @@ func (x *Request) GetGetReceiptsRequest() *GetReceiptsRequest {
return nil
}
+func (x *Request) GetGetAccountRangeRequest() *GetAccountRangeRequest {
+ if x, ok := x.GetRequest().(*Request_GetAccountRangeRequest); ok {
+ return x.GetAccountRangeRequest
+ }
+ return nil
+}
+
+func (x *Request) GetGetStorageRangesRequest() *GetStorageRangesRequest {
+ if x, ok := x.GetRequest().(*Request_GetStorageRangesRequest); ok {
+ return x.GetStorageRangesRequest
+ }
+ return nil
+}
+
+func (x *Request) GetGetByteCodesRequest() *GetByteCodesRequest {
+ if x, ok := x.GetRequest().(*Request_GetByteCodesRequest); ok {
+ return x.GetByteCodesRequest
+ }
+ return nil
+}
+
+func (x *Request) GetGetTrieNodesRequest() *GetTrieNodesRequest {
+ if x, ok := x.GetRequest().(*Request_GetTrieNodesRequest); ok {
+ return x.GetTrieNodesRequest
+ }
+ return nil
+}
+
type isRequest_Request interface {
isRequest_Request()
}
@@ -234,6 +266,22 @@ type Request_GetReceiptsRequest struct {
GetReceiptsRequest *GetReceiptsRequest `protobuf:"bytes,7,opt,name=get_receipts_request,json=getReceiptsRequest,proto3,oneof"`
}
+type Request_GetAccountRangeRequest struct {
+ GetAccountRangeRequest *GetAccountRangeRequest `protobuf:"bytes,8,opt,name=get_account_range_request,json=getAccountRangeRequest,proto3,oneof"`
+}
+
+type Request_GetStorageRangesRequest struct {
+ GetStorageRangesRequest *GetStorageRangesRequest `protobuf:"bytes,9,opt,name=get_storage_ranges_request,json=getStorageRangesRequest,proto3,oneof"`
+}
+
+type Request_GetByteCodesRequest struct {
+ GetByteCodesRequest *GetByteCodesRequest `protobuf:"bytes,10,opt,name=get_byte_codes_request,json=getByteCodesRequest,proto3,oneof"`
+}
+
+type Request_GetTrieNodesRequest struct {
+ GetTrieNodesRequest *GetTrieNodesRequest `protobuf:"bytes,11,opt,name=get_trie_nodes_request,json=getTrieNodesRequest,proto3,oneof"`
+}
+
func (*Request_GetBlockNumberRequest) isRequest_Request() {}
func (*Request_GetBlockHashesRequest) isRequest_Request() {}
@@ -246,6 +294,14 @@ func (*Request_GetNodeDataRequest) isRequest_Request() {}
func (*Request_GetReceiptsRequest) isRequest_Request() {}
+func (*Request_GetAccountRangeRequest) isRequest_Request() {}
+
+func (*Request_GetStorageRangesRequest) isRequest_Request() {}
+
+func (*Request_GetByteCodesRequest) isRequest_Request() {}
+
+func (*Request_GetTrieNodesRequest) isRequest_Request() {}
+
type GetBlockNumberRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -519,26 +575,19 @@ func (x *GetReceiptsRequest) GetBlockHashes() [][]byte {
return nil
}
-type Response struct {
+type GetAccountRangeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- ReqId uint64 `protobuf:"varint,1,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
- // Types that are assignable to Response:
- //
- // *Response_ErrorResponse
- // *Response_GetBlockNumberResponse
- // *Response_GetBlockHashesResponse
- // *Response_GetBlocksByNumResponse
- // *Response_GetBlocksByHashesResponse
- // *Response_GetNodeDataResponse
- // *Response_GetReceiptsResponse
- Response isResponse_Response `protobuf_oneof:"response"`
+ Root []byte `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"`
+ Origin []byte `protobuf:"bytes,2,opt,name=origin,proto3" json:"origin,omitempty"`
+ Limit []byte `protobuf:"bytes,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ Bytes uint64 `protobuf:"varint,4,opt,name=bytes,proto3" json:"bytes,omitempty"`
}
-func (x *Response) Reset() {
- *x = Response{}
+func (x *GetAccountRangeRequest) Reset() {
+ *x = GetAccountRangeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_msg_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -546,13 +595,13 @@ func (x *Response) Reset() {
}
}
-func (x *Response) String() string {
+func (x *GetAccountRangeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Response) ProtoMessage() {}
+func (*GetAccountRangeRequest) ProtoMessage() {}
-func (x *Response) ProtoReflect() protoreflect.Message {
+func (x *GetAccountRangeRequest) ProtoReflect() protoreflect.Message {
mi := &file_msg_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -564,130 +613,53 @@ func (x *Response) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Response.ProtoReflect.Descriptor instead.
-func (*Response) Descriptor() ([]byte, []int) {
+// Deprecated: Use GetAccountRangeRequest.ProtoReflect.Descriptor instead.
+func (*GetAccountRangeRequest) Descriptor() ([]byte, []int) {
return file_msg_proto_rawDescGZIP(), []int{8}
}
-func (x *Response) GetReqId() uint64 {
+func (x *GetAccountRangeRequest) GetRoot() []byte {
if x != nil {
- return x.ReqId
- }
- return 0
-}
-
-func (m *Response) GetResponse() isResponse_Response {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
-func (x *Response) GetErrorResponse() *ErrorResponse {
- if x, ok := x.GetResponse().(*Response_ErrorResponse); ok {
- return x.ErrorResponse
- }
- return nil
-}
-
-func (x *Response) GetGetBlockNumberResponse() *GetBlockNumberResponse {
- if x, ok := x.GetResponse().(*Response_GetBlockNumberResponse); ok {
- return x.GetBlockNumberResponse
- }
- return nil
-}
-
-func (x *Response) GetGetBlockHashesResponse() *GetBlockHashesResponse {
- if x, ok := x.GetResponse().(*Response_GetBlockHashesResponse); ok {
- return x.GetBlockHashesResponse
- }
- return nil
-}
-
-func (x *Response) GetGetBlocksByNumResponse() *GetBlocksByNumResponse {
- if x, ok := x.GetResponse().(*Response_GetBlocksByNumResponse); ok {
- return x.GetBlocksByNumResponse
+ return x.Root
}
return nil
}
-func (x *Response) GetGetBlocksByHashesResponse() *GetBlocksByHashesResponse {
- if x, ok := x.GetResponse().(*Response_GetBlocksByHashesResponse); ok {
- return x.GetBlocksByHashesResponse
+func (x *GetAccountRangeRequest) GetOrigin() []byte {
+ if x != nil {
+ return x.Origin
}
return nil
}
-func (x *Response) GetGetNodeDataResponse() *GetNodeDataResponse {
- if x, ok := x.GetResponse().(*Response_GetNodeDataResponse); ok {
- return x.GetNodeDataResponse
+func (x *GetAccountRangeRequest) GetLimit() []byte {
+ if x != nil {
+ return x.Limit
}
return nil
}
-func (x *Response) GetGetReceiptsResponse() *GetReceiptsResponse {
- if x, ok := x.GetResponse().(*Response_GetReceiptsResponse); ok {
- return x.GetReceiptsResponse
+func (x *GetAccountRangeRequest) GetBytes() uint64 {
+ if x != nil {
+ return x.Bytes
}
- return nil
-}
-
-type isResponse_Response interface {
- isResponse_Response()
-}
-
-type Response_ErrorResponse struct {
- ErrorResponse *ErrorResponse `protobuf:"bytes,2,opt,name=error_response,json=errorResponse,proto3,oneof"`
-}
-
-type Response_GetBlockNumberResponse struct {
- GetBlockNumberResponse *GetBlockNumberResponse `protobuf:"bytes,3,opt,name=get_block_number_response,json=getBlockNumberResponse,proto3,oneof"`
-}
-
-type Response_GetBlockHashesResponse struct {
- GetBlockHashesResponse *GetBlockHashesResponse `protobuf:"bytes,4,opt,name=get_block_hashes_response,json=getBlockHashesResponse,proto3,oneof"`
-}
-
-type Response_GetBlocksByNumResponse struct {
- GetBlocksByNumResponse *GetBlocksByNumResponse `protobuf:"bytes,5,opt,name=get_blocks_by_num_response,json=getBlocksByNumResponse,proto3,oneof"`
-}
-
-type Response_GetBlocksByHashesResponse struct {
- GetBlocksByHashesResponse *GetBlocksByHashesResponse `protobuf:"bytes,6,opt,name=get_blocks_by_hashes_response,json=getBlocksByHashesResponse,proto3,oneof"`
-}
-
-type Response_GetNodeDataResponse struct {
- GetNodeDataResponse *GetNodeDataResponse `protobuf:"bytes,7,opt,name=get_node_data_response,json=getNodeDataResponse,proto3,oneof"`
-}
-
-type Response_GetReceiptsResponse struct {
- GetReceiptsResponse *GetReceiptsResponse `protobuf:"bytes,8,opt,name=get_receipts_response,json=getReceiptsResponse,proto3,oneof"`
+ return 0
}
-func (*Response_ErrorResponse) isResponse_Response() {}
-
-func (*Response_GetBlockNumberResponse) isResponse_Response() {}
-
-func (*Response_GetBlockHashesResponse) isResponse_Response() {}
-
-func (*Response_GetBlocksByNumResponse) isResponse_Response() {}
-
-func (*Response_GetBlocksByHashesResponse) isResponse_Response() {}
-
-func (*Response_GetNodeDataResponse) isResponse_Response() {}
-
-func (*Response_GetReceiptsResponse) isResponse_Response() {}
-
-type ErrorResponse struct {
+type GetStorageRangesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+ Root []byte `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"`
+ Accounts [][]byte `protobuf:"bytes,2,rep,name=accounts,proto3" json:"accounts,omitempty"`
+ Origin []byte `protobuf:"bytes,3,opt,name=origin,proto3" json:"origin,omitempty"`
+ Limit []byte `protobuf:"bytes,4,opt,name=limit,proto3" json:"limit,omitempty"`
+ Bytes uint64 `protobuf:"varint,5,opt,name=bytes,proto3" json:"bytes,omitempty"`
}
-func (x *ErrorResponse) Reset() {
- *x = ErrorResponse{}
+func (x *GetStorageRangesRequest) Reset() {
+ *x = GetStorageRangesRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_msg_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -695,13 +667,13 @@ func (x *ErrorResponse) Reset() {
}
}
-func (x *ErrorResponse) String() string {
+func (x *GetStorageRangesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ErrorResponse) ProtoMessage() {}
+func (*GetStorageRangesRequest) ProtoMessage() {}
-func (x *ErrorResponse) ProtoReflect() protoreflect.Message {
+func (x *GetStorageRangesRequest) ProtoReflect() protoreflect.Message {
mi := &file_msg_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -713,28 +685,57 @@ func (x *ErrorResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead.
-func (*ErrorResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use GetStorageRangesRequest.ProtoReflect.Descriptor instead.
+func (*GetStorageRangesRequest) Descriptor() ([]byte, []int) {
return file_msg_proto_rawDescGZIP(), []int{9}
}
-func (x *ErrorResponse) GetError() string {
+func (x *GetStorageRangesRequest) GetRoot() []byte {
if x != nil {
- return x.Error
+ return x.Root
}
- return ""
+ return nil
}
-type GetBlockNumberResponse struct {
+func (x *GetStorageRangesRequest) GetAccounts() [][]byte {
+ if x != nil {
+ return x.Accounts
+ }
+ return nil
+}
+
+func (x *GetStorageRangesRequest) GetOrigin() []byte {
+ if x != nil {
+ return x.Origin
+ }
+ return nil
+}
+
+func (x *GetStorageRangesRequest) GetLimit() []byte {
+ if x != nil {
+ return x.Limit
+ }
+ return nil
+}
+
+func (x *GetStorageRangesRequest) GetBytes() uint64 {
+ if x != nil {
+ return x.Bytes
+ }
+ return 0
+}
+
+type GetByteCodesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"`
+ Hashes [][]byte `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"`
+ Bytes uint64 `protobuf:"varint,2,opt,name=bytes,proto3" json:"bytes,omitempty"`
}
-func (x *GetBlockNumberResponse) Reset() {
- *x = GetBlockNumberResponse{}
+func (x *GetByteCodesRequest) Reset() {
+ *x = GetByteCodesRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_msg_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -742,13 +743,13 @@ func (x *GetBlockNumberResponse) Reset() {
}
}
-func (x *GetBlockNumberResponse) String() string {
+func (x *GetByteCodesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetBlockNumberResponse) ProtoMessage() {}
+func (*GetByteCodesRequest) ProtoMessage() {}
-func (x *GetBlockNumberResponse) ProtoReflect() protoreflect.Message {
+func (x *GetByteCodesRequest) ProtoReflect() protoreflect.Message {
mi := &file_msg_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -760,28 +761,35 @@ func (x *GetBlockNumberResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetBlockNumberResponse.ProtoReflect.Descriptor instead.
-func (*GetBlockNumberResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use GetByteCodesRequest.ProtoReflect.Descriptor instead.
+func (*GetByteCodesRequest) Descriptor() ([]byte, []int) {
return file_msg_proto_rawDescGZIP(), []int{10}
}
-func (x *GetBlockNumberResponse) GetNumber() uint64 {
+func (x *GetByteCodesRequest) GetHashes() [][]byte {
if x != nil {
- return x.Number
+ return x.Hashes
+ }
+ return nil
+}
+
+func (x *GetByteCodesRequest) GetBytes() uint64 {
+ if x != nil {
+ return x.Bytes
}
return 0
}
-type GetBlockHashesResponse struct {
+type TrieNodePathSet struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Hashes [][]byte `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"`
+ Pathset [][]byte `protobuf:"bytes,1,rep,name=pathset,proto3" json:"pathset,omitempty"`
}
-func (x *GetBlockHashesResponse) Reset() {
- *x = GetBlockHashesResponse{}
+func (x *TrieNodePathSet) Reset() {
+ *x = TrieNodePathSet{}
if protoimpl.UnsafeEnabled {
mi := &file_msg_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -789,13 +797,13 @@ func (x *GetBlockHashesResponse) Reset() {
}
}
-func (x *GetBlockHashesResponse) String() string {
+func (x *TrieNodePathSet) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetBlockHashesResponse) ProtoMessage() {}
+func (*TrieNodePathSet) ProtoMessage() {}
-func (x *GetBlockHashesResponse) ProtoReflect() protoreflect.Message {
+func (x *TrieNodePathSet) ProtoReflect() protoreflect.Message {
mi := &file_msg_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -807,29 +815,30 @@ func (x *GetBlockHashesResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetBlockHashesResponse.ProtoReflect.Descriptor instead.
-func (*GetBlockHashesResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use TrieNodePathSet.ProtoReflect.Descriptor instead.
+func (*TrieNodePathSet) Descriptor() ([]byte, []int) {
return file_msg_proto_rawDescGZIP(), []int{11}
}
-func (x *GetBlockHashesResponse) GetHashes() [][]byte {
+func (x *TrieNodePathSet) GetPathset() [][]byte {
if x != nil {
- return x.Hashes
+ return x.Pathset
}
return nil
}
-type GetBlocksByNumResponse struct {
+type GetTrieNodesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- BlocksBytes [][]byte `protobuf:"bytes,1,rep,name=blocks_bytes,json=blocksBytes,proto3" json:"blocks_bytes,omitempty"`
- CommitSig [][]byte `protobuf:"bytes,2,rep,name=commit_sig,json=commitSig,proto3" json:"commit_sig,omitempty"`
+ Root []byte `protobuf:"bytes,1,opt,name=root,proto3" json:"root,omitempty"`
+ Paths []*TrieNodePathSet `protobuf:"bytes,2,rep,name=paths,proto3" json:"paths,omitempty"`
+ Bytes uint64 `protobuf:"varint,3,opt,name=bytes,proto3" json:"bytes,omitempty"`
}
-func (x *GetBlocksByNumResponse) Reset() {
- *x = GetBlocksByNumResponse{}
+func (x *GetTrieNodesRequest) Reset() {
+ *x = GetTrieNodesRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_msg_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -837,13 +846,13 @@ func (x *GetBlocksByNumResponse) Reset() {
}
}
-func (x *GetBlocksByNumResponse) String() string {
+func (x *GetTrieNodesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetBlocksByNumResponse) ProtoMessage() {}
+func (*GetTrieNodesRequest) ProtoMessage() {}
-func (x *GetBlocksByNumResponse) ProtoReflect() protoreflect.Message {
+func (x *GetTrieNodesRequest) ProtoReflect() protoreflect.Message {
mi := &file_msg_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -855,36 +864,56 @@ func (x *GetBlocksByNumResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetBlocksByNumResponse.ProtoReflect.Descriptor instead.
-func (*GetBlocksByNumResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use GetTrieNodesRequest.ProtoReflect.Descriptor instead.
+func (*GetTrieNodesRequest) Descriptor() ([]byte, []int) {
return file_msg_proto_rawDescGZIP(), []int{12}
}
-func (x *GetBlocksByNumResponse) GetBlocksBytes() [][]byte {
+func (x *GetTrieNodesRequest) GetRoot() []byte {
if x != nil {
- return x.BlocksBytes
+ return x.Root
}
return nil
}
-func (x *GetBlocksByNumResponse) GetCommitSig() [][]byte {
+func (x *GetTrieNodesRequest) GetPaths() []*TrieNodePathSet {
if x != nil {
- return x.CommitSig
+ return x.Paths
}
return nil
}
-type GetBlocksByHashesResponse struct {
+func (x *GetTrieNodesRequest) GetBytes() uint64 {
+ if x != nil {
+ return x.Bytes
+ }
+ return 0
+}
+
+type Response struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- BlocksBytes [][]byte `protobuf:"bytes,1,rep,name=blocks_bytes,json=blocksBytes,proto3" json:"blocks_bytes,omitempty"`
- CommitSig [][]byte `protobuf:"bytes,2,rep,name=commit_sig,json=commitSig,proto3" json:"commit_sig,omitempty"`
+ ReqId uint64 `protobuf:"varint,1,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"`
+ // Types that are assignable to Response:
+ //
+ // *Response_ErrorResponse
+ // *Response_GetBlockNumberResponse
+ // *Response_GetBlockHashesResponse
+ // *Response_GetBlocksByNumResponse
+ // *Response_GetBlocksByHashesResponse
+ // *Response_GetNodeDataResponse
+ // *Response_GetReceiptsResponse
+ // *Response_GetAccountRangeResponse
+ // *Response_GetStorageRangesResponse
+ // *Response_GetByteCodesResponse
+ // *Response_GetTrieNodesResponse
+ Response isResponse_Response `protobuf_oneof:"response"`
}
-func (x *GetBlocksByHashesResponse) Reset() {
- *x = GetBlocksByHashesResponse{}
+func (x *Response) Reset() {
+ *x = Response{}
if protoimpl.UnsafeEnabled {
mi := &file_msg_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -892,13 +921,13 @@ func (x *GetBlocksByHashesResponse) Reset() {
}
}
-func (x *GetBlocksByHashesResponse) String() string {
+func (x *Response) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetBlocksByHashesResponse) ProtoMessage() {}
+func (*Response) ProtoMessage() {}
-func (x *GetBlocksByHashesResponse) ProtoReflect() protoreflect.Message {
+func (x *Response) ProtoReflect() protoreflect.Message {
mi := &file_msg_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
@@ -910,50 +939,754 @@ func (x *GetBlocksByHashesResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetBlocksByHashesResponse.ProtoReflect.Descriptor instead.
-func (*GetBlocksByHashesResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use Response.ProtoReflect.Descriptor instead.
+func (*Response) Descriptor() ([]byte, []int) {
return file_msg_proto_rawDescGZIP(), []int{13}
}
-func (x *GetBlocksByHashesResponse) GetBlocksBytes() [][]byte {
+func (x *Response) GetReqId() uint64 {
+ if x != nil {
+ return x.ReqId
+ }
+ return 0
+}
+
+func (m *Response) GetResponse() isResponse_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (x *Response) GetErrorResponse() *ErrorResponse {
+ if x, ok := x.GetResponse().(*Response_ErrorResponse); ok {
+ return x.ErrorResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetBlockNumberResponse() *GetBlockNumberResponse {
+ if x, ok := x.GetResponse().(*Response_GetBlockNumberResponse); ok {
+ return x.GetBlockNumberResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetBlockHashesResponse() *GetBlockHashesResponse {
+ if x, ok := x.GetResponse().(*Response_GetBlockHashesResponse); ok {
+ return x.GetBlockHashesResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetBlocksByNumResponse() *GetBlocksByNumResponse {
+ if x, ok := x.GetResponse().(*Response_GetBlocksByNumResponse); ok {
+ return x.GetBlocksByNumResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetBlocksByHashesResponse() *GetBlocksByHashesResponse {
+ if x, ok := x.GetResponse().(*Response_GetBlocksByHashesResponse); ok {
+ return x.GetBlocksByHashesResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetNodeDataResponse() *GetNodeDataResponse {
+ if x, ok := x.GetResponse().(*Response_GetNodeDataResponse); ok {
+ return x.GetNodeDataResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetReceiptsResponse() *GetReceiptsResponse {
+ if x, ok := x.GetResponse().(*Response_GetReceiptsResponse); ok {
+ return x.GetReceiptsResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetAccountRangeResponse() *GetAccountRangeResponse {
+ if x, ok := x.GetResponse().(*Response_GetAccountRangeResponse); ok {
+ return x.GetAccountRangeResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetStorageRangesResponse() *GetStorageRangesResponse {
+ if x, ok := x.GetResponse().(*Response_GetStorageRangesResponse); ok {
+ return x.GetStorageRangesResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetByteCodesResponse() *GetByteCodesResponse {
+ if x, ok := x.GetResponse().(*Response_GetByteCodesResponse); ok {
+ return x.GetByteCodesResponse
+ }
+ return nil
+}
+
+func (x *Response) GetGetTrieNodesResponse() *GetTrieNodesResponse {
+ if x, ok := x.GetResponse().(*Response_GetTrieNodesResponse); ok {
+ return x.GetTrieNodesResponse
+ }
+ return nil
+}
+
+type isResponse_Response interface {
+ isResponse_Response()
+}
+
+type Response_ErrorResponse struct {
+ ErrorResponse *ErrorResponse `protobuf:"bytes,2,opt,name=error_response,json=errorResponse,proto3,oneof"`
+}
+
+type Response_GetBlockNumberResponse struct {
+ GetBlockNumberResponse *GetBlockNumberResponse `protobuf:"bytes,3,opt,name=get_block_number_response,json=getBlockNumberResponse,proto3,oneof"`
+}
+
+type Response_GetBlockHashesResponse struct {
+ GetBlockHashesResponse *GetBlockHashesResponse `protobuf:"bytes,4,opt,name=get_block_hashes_response,json=getBlockHashesResponse,proto3,oneof"`
+}
+
+type Response_GetBlocksByNumResponse struct {
+ GetBlocksByNumResponse *GetBlocksByNumResponse `protobuf:"bytes,5,opt,name=get_blocks_by_num_response,json=getBlocksByNumResponse,proto3,oneof"`
+}
+
+type Response_GetBlocksByHashesResponse struct {
+ GetBlocksByHashesResponse *GetBlocksByHashesResponse `protobuf:"bytes,6,opt,name=get_blocks_by_hashes_response,json=getBlocksByHashesResponse,proto3,oneof"`
+}
+
+type Response_GetNodeDataResponse struct {
+ GetNodeDataResponse *GetNodeDataResponse `protobuf:"bytes,7,opt,name=get_node_data_response,json=getNodeDataResponse,proto3,oneof"`
+}
+
+type Response_GetReceiptsResponse struct {
+ GetReceiptsResponse *GetReceiptsResponse `protobuf:"bytes,8,opt,name=get_receipts_response,json=getReceiptsResponse,proto3,oneof"`
+}
+
+type Response_GetAccountRangeResponse struct {
+ GetAccountRangeResponse *GetAccountRangeResponse `protobuf:"bytes,9,opt,name=get_account_range_response,json=getAccountRangeResponse,proto3,oneof"`
+}
+
+type Response_GetStorageRangesResponse struct {
+ GetStorageRangesResponse *GetStorageRangesResponse `protobuf:"bytes,10,opt,name=get_storage_ranges_response,json=getStorageRangesResponse,proto3,oneof"`
+}
+
+type Response_GetByteCodesResponse struct {
+ GetByteCodesResponse *GetByteCodesResponse `protobuf:"bytes,11,opt,name=get_byte_codes_response,json=getByteCodesResponse,proto3,oneof"`
+}
+
+type Response_GetTrieNodesResponse struct {
+ GetTrieNodesResponse *GetTrieNodesResponse `protobuf:"bytes,12,opt,name=get_trie_nodes_response,json=getTrieNodesResponse,proto3,oneof"`
+}
+
+func (*Response_ErrorResponse) isResponse_Response() {}
+
+func (*Response_GetBlockNumberResponse) isResponse_Response() {}
+
+func (*Response_GetBlockHashesResponse) isResponse_Response() {}
+
+func (*Response_GetBlocksByNumResponse) isResponse_Response() {}
+
+func (*Response_GetBlocksByHashesResponse) isResponse_Response() {}
+
+func (*Response_GetNodeDataResponse) isResponse_Response() {}
+
+func (*Response_GetReceiptsResponse) isResponse_Response() {}
+
+func (*Response_GetAccountRangeResponse) isResponse_Response() {}
+
+func (*Response_GetStorageRangesResponse) isResponse_Response() {}
+
+func (*Response_GetByteCodesResponse) isResponse_Response() {}
+
+func (*Response_GetTrieNodesResponse) isResponse_Response() {}
+
+type ErrorResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
+}
+
+func (x *ErrorResponse) Reset() {
+ *x = ErrorResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ErrorResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorResponse) ProtoMessage() {}
+
+func (x *ErrorResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead.
+func (*ErrorResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *ErrorResponse) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type GetBlockNumberResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Number uint64 `protobuf:"varint,1,opt,name=number,proto3" json:"number,omitempty"`
+}
+
+func (x *GetBlockNumberResponse) Reset() {
+ *x = GetBlockNumberResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBlockNumberResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBlockNumberResponse) ProtoMessage() {}
+
+func (x *GetBlockNumberResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBlockNumberResponse.ProtoReflect.Descriptor instead.
+func (*GetBlockNumberResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *GetBlockNumberResponse) GetNumber() uint64 {
+ if x != nil {
+ return x.Number
+ }
+ return 0
+}
+
+type GetBlockHashesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hashes [][]byte `protobuf:"bytes,1,rep,name=hashes,proto3" json:"hashes,omitempty"`
+}
+
+func (x *GetBlockHashesResponse) Reset() {
+ *x = GetBlockHashesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBlockHashesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBlockHashesResponse) ProtoMessage() {}
+
+func (x *GetBlockHashesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBlockHashesResponse.ProtoReflect.Descriptor instead.
+func (*GetBlockHashesResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{16}
+}
+
+func (x *GetBlockHashesResponse) GetHashes() [][]byte {
+ if x != nil {
+ return x.Hashes
+ }
+ return nil
+}
+
+type GetBlocksByNumResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlocksBytes [][]byte `protobuf:"bytes,1,rep,name=blocks_bytes,json=blocksBytes,proto3" json:"blocks_bytes,omitempty"`
+ CommitSig [][]byte `protobuf:"bytes,2,rep,name=commit_sig,json=commitSig,proto3" json:"commit_sig,omitempty"`
+}
+
+func (x *GetBlocksByNumResponse) Reset() {
+ *x = GetBlocksByNumResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBlocksByNumResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBlocksByNumResponse) ProtoMessage() {}
+
+func (x *GetBlocksByNumResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBlocksByNumResponse.ProtoReflect.Descriptor instead.
+func (*GetBlocksByNumResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *GetBlocksByNumResponse) GetBlocksBytes() [][]byte {
+ if x != nil {
+ return x.BlocksBytes
+ }
+ return nil
+}
+
+func (x *GetBlocksByNumResponse) GetCommitSig() [][]byte {
+ if x != nil {
+ return x.CommitSig
+ }
+ return nil
+}
+
+type GetBlocksByHashesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ BlocksBytes [][]byte `protobuf:"bytes,1,rep,name=blocks_bytes,json=blocksBytes,proto3" json:"blocks_bytes,omitempty"`
+ CommitSig [][]byte `protobuf:"bytes,2,rep,name=commit_sig,json=commitSig,proto3" json:"commit_sig,omitempty"`
+}
+
+func (x *GetBlocksByHashesResponse) Reset() {
+ *x = GetBlocksByHashesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetBlocksByHashesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetBlocksByHashesResponse) ProtoMessage() {}
+
+func (x *GetBlocksByHashesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetBlocksByHashesResponse.ProtoReflect.Descriptor instead.
+func (*GetBlocksByHashesResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{18}
+}
+
+func (x *GetBlocksByHashesResponse) GetBlocksBytes() [][]byte {
if x != nil {
return x.BlocksBytes
}
return nil
}
-func (x *GetBlocksByHashesResponse) GetCommitSig() [][]byte {
+func (x *GetBlocksByHashesResponse) GetCommitSig() [][]byte {
+ if x != nil {
+ return x.CommitSig
+ }
+ return nil
+}
+
+type GetNodeDataResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DataBytes [][]byte `protobuf:"bytes,1,rep,name=data_bytes,json=dataBytes,proto3" json:"data_bytes,omitempty"`
+}
+
+func (x *GetNodeDataResponse) Reset() {
+ *x = GetNodeDataResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetNodeDataResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNodeDataResponse) ProtoMessage() {}
+
+func (x *GetNodeDataResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNodeDataResponse.ProtoReflect.Descriptor instead.
+func (*GetNodeDataResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *GetNodeDataResponse) GetDataBytes() [][]byte {
+ if x != nil {
+ return x.DataBytes
+ }
+ return nil
+}
+
+type Receipts struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReceiptBytes [][]byte `protobuf:"bytes,1,rep,name=receipt_bytes,json=receiptBytes,proto3" json:"receipt_bytes,omitempty"`
+}
+
+func (x *Receipts) Reset() {
+ *x = Receipts{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Receipts) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Receipts) ProtoMessage() {}
+
+func (x *Receipts) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Receipts.ProtoReflect.Descriptor instead.
+func (*Receipts) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *Receipts) GetReceiptBytes() [][]byte {
+ if x != nil {
+ return x.ReceiptBytes
+ }
+ return nil
+}
+
+type GetReceiptsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Receipts map[uint64]*Receipts `protobuf:"bytes,1,rep,name=receipts,proto3" json:"receipts,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *GetReceiptsResponse) Reset() {
+ *x = GetReceiptsResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetReceiptsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetReceiptsResponse) ProtoMessage() {}
+
+func (x *GetReceiptsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetReceiptsResponse.ProtoReflect.Descriptor instead.
+func (*GetReceiptsResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *GetReceiptsResponse) GetReceipts() map[uint64]*Receipts {
+ if x != nil {
+ return x.Receipts
+ }
+ return nil
+}
+
+type AccountData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
+ Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
+}
+
+func (x *AccountData) Reset() {
+ *x = AccountData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AccountData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AccountData) ProtoMessage() {}
+
+func (x *AccountData) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AccountData.ProtoReflect.Descriptor instead.
+func (*AccountData) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *AccountData) GetHash() []byte {
+ if x != nil {
+ return x.Hash
+ }
+ return nil
+}
+
+func (x *AccountData) GetBody() []byte {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+type GetAccountRangeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Accounts []*AccountData `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts,omitempty"`
+ Proof [][]byte `protobuf:"bytes,2,rep,name=proof,proto3" json:"proof,omitempty"`
+}
+
+func (x *GetAccountRangeResponse) Reset() {
+ *x = GetAccountRangeResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetAccountRangeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetAccountRangeResponse) ProtoMessage() {}
+
+func (x *GetAccountRangeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[23]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetAccountRangeResponse.ProtoReflect.Descriptor instead.
+func (*GetAccountRangeResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *GetAccountRangeResponse) GetAccounts() []*AccountData {
+ if x != nil {
+ return x.Accounts
+ }
+ return nil
+}
+
+func (x *GetAccountRangeResponse) GetProof() [][]byte {
+ if x != nil {
+ return x.Proof
+ }
+ return nil
+}
+
+type StorageData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
+ Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"`
+}
+
+func (x *StorageData) Reset() {
+ *x = StorageData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StorageData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StorageData) ProtoMessage() {}
+
+func (x *StorageData) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[24]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StorageData.ProtoReflect.Descriptor instead.
+func (*StorageData) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *StorageData) GetHash() []byte {
+ if x != nil {
+ return x.Hash
+ }
+ return nil
+}
+
+func (x *StorageData) GetBody() []byte {
if x != nil {
- return x.CommitSig
+ return x.Body
}
return nil
}
-type GetNodeDataResponse struct {
+type StoragesData struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- DataBytes [][]byte `protobuf:"bytes,1,rep,name=data_bytes,json=dataBytes,proto3" json:"data_bytes,omitempty"`
+ Data []*StorageData `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty"`
}
-func (x *GetNodeDataResponse) Reset() {
- *x = GetNodeDataResponse{}
+func (x *StoragesData) Reset() {
+ *x = StoragesData{}
if protoimpl.UnsafeEnabled {
- mi := &file_msg_proto_msgTypes[14]
+ mi := &file_msg_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *GetNodeDataResponse) String() string {
+func (x *StoragesData) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetNodeDataResponse) ProtoMessage() {}
+func (*StoragesData) ProtoMessage() {}
-func (x *GetNodeDataResponse) ProtoReflect() protoreflect.Message {
- mi := &file_msg_proto_msgTypes[14]
+func (x *StoragesData) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -964,43 +1697,44 @@ func (x *GetNodeDataResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetNodeDataResponse.ProtoReflect.Descriptor instead.
-func (*GetNodeDataResponse) Descriptor() ([]byte, []int) {
- return file_msg_proto_rawDescGZIP(), []int{14}
+// Deprecated: Use StoragesData.ProtoReflect.Descriptor instead.
+func (*StoragesData) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{25}
}
-func (x *GetNodeDataResponse) GetDataBytes() [][]byte {
+func (x *StoragesData) GetData() []*StorageData {
if x != nil {
- return x.DataBytes
+ return x.Data
}
return nil
}
-type Receipts struct {
+type GetStorageRangesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- ReceiptBytes [][]byte `protobuf:"bytes,1,rep,name=receipt_bytes,json=receiptBytes,proto3" json:"receipt_bytes,omitempty"`
+ Slots []*StoragesData `protobuf:"bytes,1,rep,name=slots,proto3" json:"slots,omitempty"`
+ Proof [][]byte `protobuf:"bytes,2,rep,name=proof,proto3" json:"proof,omitempty"`
}
-func (x *Receipts) Reset() {
- *x = Receipts{}
+func (x *GetStorageRangesResponse) Reset() {
+ *x = GetStorageRangesResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_msg_proto_msgTypes[15]
+ mi := &file_msg_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *Receipts) String() string {
+func (x *GetStorageRangesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*Receipts) ProtoMessage() {}
+func (*GetStorageRangesResponse) ProtoMessage() {}
-func (x *Receipts) ProtoReflect() protoreflect.Message {
- mi := &file_msg_proto_msgTypes[15]
+func (x *GetStorageRangesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1011,43 +1745,50 @@ func (x *Receipts) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use Receipts.ProtoReflect.Descriptor instead.
-func (*Receipts) Descriptor() ([]byte, []int) {
- return file_msg_proto_rawDescGZIP(), []int{15}
+// Deprecated: Use GetStorageRangesResponse.ProtoReflect.Descriptor instead.
+func (*GetStorageRangesResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{26}
}
-func (x *Receipts) GetReceiptBytes() [][]byte {
+func (x *GetStorageRangesResponse) GetSlots() []*StoragesData {
if x != nil {
- return x.ReceiptBytes
+ return x.Slots
}
return nil
}
-type GetReceiptsResponse struct {
+func (x *GetStorageRangesResponse) GetProof() [][]byte {
+ if x != nil {
+ return x.Proof
+ }
+ return nil
+}
+
+type GetByteCodesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Receipts map[uint64]*Receipts `protobuf:"bytes,1,rep,name=receipts,proto3" json:"receipts,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Codes [][]byte `protobuf:"bytes,1,rep,name=codes,proto3" json:"codes,omitempty"`
}
-func (x *GetReceiptsResponse) Reset() {
- *x = GetReceiptsResponse{}
+func (x *GetByteCodesResponse) Reset() {
+ *x = GetByteCodesResponse{}
if protoimpl.UnsafeEnabled {
- mi := &file_msg_proto_msgTypes[16]
+ mi := &file_msg_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
-func (x *GetReceiptsResponse) String() string {
+func (x *GetByteCodesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*GetReceiptsResponse) ProtoMessage() {}
+func (*GetByteCodesResponse) ProtoMessage() {}
-func (x *GetReceiptsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_msg_proto_msgTypes[16]
+func (x *GetByteCodesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1058,14 +1799,61 @@ func (x *GetReceiptsResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use GetReceiptsResponse.ProtoReflect.Descriptor instead.
-func (*GetReceiptsResponse) Descriptor() ([]byte, []int) {
- return file_msg_proto_rawDescGZIP(), []int{16}
+// Deprecated: Use GetByteCodesResponse.ProtoReflect.Descriptor instead.
+func (*GetByteCodesResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{27}
}
-func (x *GetReceiptsResponse) GetReceipts() map[uint64]*Receipts {
+func (x *GetByteCodesResponse) GetCodes() [][]byte {
if x != nil {
- return x.Receipts
+ return x.Codes
+ }
+ return nil
+}
+
+type GetTrieNodesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Nodes [][]byte `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
+}
+
+func (x *GetTrieNodesResponse) Reset() {
+ *x = GetTrieNodesResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_msg_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GetTrieNodesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetTrieNodesResponse) ProtoMessage() {}
+
+func (x *GetTrieNodesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_msg_proto_msgTypes[28]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetTrieNodesResponse.ProtoReflect.Descriptor instead.
+func (*GetTrieNodesResponse) Descriptor() ([]byte, []int) {
+ return file_msg_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *GetTrieNodesResponse) GetNodes() [][]byte {
+ if x != nil {
+ return x.Nodes
}
return nil
}
@@ -1084,7 +1872,7 @@ var file_msg_proto_rawDesc = []byte{
0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79,
0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x04, 0x72, 0x65, 0x73, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x72,
- 0x65, 0x71, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x22, 0xbd, 0x05, 0x0a, 0x07, 0x52,
+ 0x65, 0x71, 0x5f, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x22, 0xf6, 0x08, 0x0a, 0x07, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x65, 0x71, 0x5f, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x72, 0x65, 0x71, 0x49, 0x64, 0x12, 0x6d, 0x0a,
0x18, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65,
@@ -1127,119 +1915,239 @@ var file_msg_proto_rawDesc = []byte{
0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70,
0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x12, 0x67, 0x65, 0x74,
- 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42,
- 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x47, 0x65,
- 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x22, 0x2f, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48,
- 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x04,
- 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04,
- 0x6e, 0x75, 0x6d, 0x73, 0x22, 0x2f, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
- 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a,
- 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x01, 0x52,
- 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x22, 0x3d, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63,
- 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61,
- 0x73, 0x68, 0x65, 0x73, 0x22, 0x35, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44,
- 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x6f,
- 0x64, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52,
- 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x12, 0x47,
- 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61,
- 0x73, 0x68, 0x65, 0x73, 0x22, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x65, 0x71, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x05, 0x72, 0x65, 0x71, 0x49, 0x64, 0x12, 0x53, 0x0a, 0x0e, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61,
- 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x45,
- 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0d,
- 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x70, 0x0a,
- 0x19, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65,
- 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61,
- 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47,
- 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63,
- 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x70, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73,
- 0x68, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x70, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72,
0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x71, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f,
- 0x62, 0x79, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e,
+ 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x41, 0x63,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x73, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18,
+ 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e,
0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73,
- 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e,
- 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65,
- 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x1d, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63,
- 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x68, 0x61,
- 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e,
- 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f,
+ 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x67,
+ 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x67, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x79,
+ 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79,
+ 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x42,
+ 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x67, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65,
+ 0x73, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
+ 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65,
+ 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e,
+ 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2f, 0x0a, 0x15,
+ 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x22, 0x2f, 0x0a,
+ 0x15, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x04, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x75, 0x6d, 0x73, 0x22, 0x3d,
+ 0x0a, 0x18, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73,
+ 0x68, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x35, 0x0a,
+ 0x12, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x68, 0x61, 0x73, 0x68,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x48, 0x61,
+ 0x73, 0x68, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69,
+ 0x70, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x70, 0x0a,
+ 0x16, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f,
+ 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x72, 0x69,
+ 0x67, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22,
+ 0x8d, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61,
+ 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72,
+ 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12,
+ 0x1a, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0c, 0x52, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f,
+ 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x72, 0x69,
+ 0x67, 0x69, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22,
+ 0x43, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x12, 0x14,
+ 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65,
+ 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x68, 0x73,
+ 0x65, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x74, 0x68, 0x73, 0x65,
+ 0x74, 0x22, 0x83, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64,
+ 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x42, 0x0a,
+ 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x68,
+ 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79,
+ 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x54, 0x72, 0x69, 0x65, 0x4e,
+ 0x6f, 0x64, 0x65, 0x50, 0x61, 0x74, 0x68, 0x53, 0x65, 0x74, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68,
+ 0x73, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0xeb, 0x09, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x65, 0x71, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x72, 0x65, 0x71, 0x49, 0x64, 0x12, 0x53, 0x0a, 0x0e, 0x65,
+ 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48,
+ 0x00, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x70, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75,
+ 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x42,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x70, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f,
+ 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68,
+ 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65,
+ 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f,
+ 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73,
+ 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52,
+ 0x16, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x1d, 0x67, 0x65, 0x74, 0x5f, 0x62,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x5f,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36,
+ 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e,
+ 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74,
+ 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x19, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f,
0x63, 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x19, 0x67, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73,
- 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x67, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x64, 0x61, 0x74,
- 0x61, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61,
- 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47,
+ 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x16, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
+ 0x64, 0x61, 0x74, 0x61, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74,
+ 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65,
+ 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15,
+ 0x67, 0x65, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61,
+ 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e,
+ 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63,
+ 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52,
+ 0x13, 0x67, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x73, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f,
+ 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00,
+ 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x76, 0x0a, 0x1b, 0x67, 0x65, 0x74,
+ 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x5f,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35,
+ 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e,
+ 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74,
+ 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x18, 0x67, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x6a, 0x0a, 0x17, 0x67, 0x65, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x63, 0x6f,
+ 0x64, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x2e, 0x47, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x14, 0x67, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65,
+ 0x43, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, 0x0a,
+ 0x17, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x72, 0x69, 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31,
+ 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e,
+ 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74,
+ 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x48, 0x00, 0x52, 0x14, 0x67, 0x65, 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x30, 0x0a, 0x16,
+ 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x30,
+ 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73,
+ 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e,
+ 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c,
+ 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x22, 0x5d, 0x0a, 0x19,
+ 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f,
+ 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52,
+ 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c,
+ 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x22, 0x34, 0x0a, 0x13, 0x47,
0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61, 0x74,
- 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x15, 0x67, 0x65, 0x74,
- 0x5f, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f,
+ 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x42, 0x79, 0x74, 0x65,
+ 0x73, 0x22, 0x2f, 0x0a, 0x08, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x12, 0x23, 0x0a,
+ 0x0d, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x42, 0x79, 0x74,
+ 0x65, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70,
+ 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x08, 0x72, 0x65,
+ 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68,
+ 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79,
+ 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65,
+ 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52,
+ 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65,
+ 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x1a, 0x62, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70,
+ 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f,
0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70,
- 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65,
- 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x0a,
- 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14,
- 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
- 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16,
- 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06,
- 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f,
- 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x16, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
- 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x5a, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x42,
- 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42, 0x79, 0x4e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74,
- 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73,
- 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f,
- 0x73, 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69,
- 0x74, 0x53, 0x69, 0x67, 0x22, 0x5d, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b,
- 0x73, 0x42, 0x79, 0x48, 0x61, 0x73, 0x68, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65,
- 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x42,
- 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73,
- 0x69, 0x67, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
- 0x53, 0x69, 0x67, 0x22, 0x34, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x44, 0x61,
- 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61,
- 0x74, 0x61, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09,
- 0x64, 0x61, 0x74, 0x61, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x08, 0x52, 0x65, 0x63,
- 0x65, 0x69, 0x70, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74,
- 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65,
- 0x63, 0x65, 0x69, 0x70, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xd5, 0x01, 0x0a, 0x13, 0x47,
- 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x08, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73,
- 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x1a, 0x62,
- 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
- 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65,
- 0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x25, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61,
- 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52,
- 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
- 0x38, 0x01, 0x42, 0x0c, 0x5a, 0x0a, 0x2e, 0x2f, 0x3b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x73, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x35, 0x0a, 0x0b, 0x41, 0x63,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73,
+ 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a,
+ 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64,
+ 0x79, 0x22, 0x75, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x08,
+ 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e,
+ 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x63, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0x35, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x62,
+ 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x22,
+ 0x4c, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12,
+ 0x3c, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e,
+ 0x68, 0x61, 0x72, 0x6d, 0x6f, 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73,
+ 0x79, 0x6e, 0x63, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x71, 0x0a,
+ 0x18, 0x47, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x6c, 0x6f,
+ 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x68, 0x61, 0x72, 0x6d, 0x6f,
+ 0x6e, 0x79, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x2e, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x73, 0x44,
+ 0x61, 0x74, 0x61, 0x52, 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x72,
+ 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x6f, 0x66,
+ 0x22, 0x2c, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x42, 0x79, 0x74, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x73,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x64, 0x65,
+ 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x2c,
+ 0x0a, 0x14, 0x47, 0x65, 0x74, 0x54, 0x72, 0x69, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x0c, 0x5a, 0x0a,
+ 0x2e, 0x2f, 0x3b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
var (
@@ -1254,7 +2162,7 @@ func file_msg_proto_rawDescGZIP() []byte {
return file_msg_proto_rawDescData
}
-var file_msg_proto_msgTypes = make([]protoimpl.MessageInfo, 18)
+var file_msg_proto_msgTypes = make([]protoimpl.MessageInfo, 30)
var file_msg_proto_goTypes = []interface{}{
(*Message)(nil), // 0: harmony.stream.sync.message.Message
(*Request)(nil), // 1: harmony.stream.sync.message.Request
@@ -1264,40 +2172,64 @@ var file_msg_proto_goTypes = []interface{}{
(*GetBlocksByHashesRequest)(nil), // 5: harmony.stream.sync.message.GetBlocksByHashesRequest
(*GetNodeDataRequest)(nil), // 6: harmony.stream.sync.message.GetNodeDataRequest
(*GetReceiptsRequest)(nil), // 7: harmony.stream.sync.message.GetReceiptsRequest
- (*Response)(nil), // 8: harmony.stream.sync.message.Response
- (*ErrorResponse)(nil), // 9: harmony.stream.sync.message.ErrorResponse
- (*GetBlockNumberResponse)(nil), // 10: harmony.stream.sync.message.GetBlockNumberResponse
- (*GetBlockHashesResponse)(nil), // 11: harmony.stream.sync.message.GetBlockHashesResponse
- (*GetBlocksByNumResponse)(nil), // 12: harmony.stream.sync.message.GetBlocksByNumResponse
- (*GetBlocksByHashesResponse)(nil), // 13: harmony.stream.sync.message.GetBlocksByHashesResponse
- (*GetNodeDataResponse)(nil), // 14: harmony.stream.sync.message.GetNodeDataResponse
- (*Receipts)(nil), // 15: harmony.stream.sync.message.Receipts
- (*GetReceiptsResponse)(nil), // 16: harmony.stream.sync.message.GetReceiptsResponse
- nil, // 17: harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry
+ (*GetAccountRangeRequest)(nil), // 8: harmony.stream.sync.message.GetAccountRangeRequest
+ (*GetStorageRangesRequest)(nil), // 9: harmony.stream.sync.message.GetStorageRangesRequest
+ (*GetByteCodesRequest)(nil), // 10: harmony.stream.sync.message.GetByteCodesRequest
+ (*TrieNodePathSet)(nil), // 11: harmony.stream.sync.message.TrieNodePathSet
+ (*GetTrieNodesRequest)(nil), // 12: harmony.stream.sync.message.GetTrieNodesRequest
+ (*Response)(nil), // 13: harmony.stream.sync.message.Response
+ (*ErrorResponse)(nil), // 14: harmony.stream.sync.message.ErrorResponse
+ (*GetBlockNumberResponse)(nil), // 15: harmony.stream.sync.message.GetBlockNumberResponse
+ (*GetBlockHashesResponse)(nil), // 16: harmony.stream.sync.message.GetBlockHashesResponse
+ (*GetBlocksByNumResponse)(nil), // 17: harmony.stream.sync.message.GetBlocksByNumResponse
+ (*GetBlocksByHashesResponse)(nil), // 18: harmony.stream.sync.message.GetBlocksByHashesResponse
+ (*GetNodeDataResponse)(nil), // 19: harmony.stream.sync.message.GetNodeDataResponse
+ (*Receipts)(nil), // 20: harmony.stream.sync.message.Receipts
+ (*GetReceiptsResponse)(nil), // 21: harmony.stream.sync.message.GetReceiptsResponse
+ (*AccountData)(nil), // 22: harmony.stream.sync.message.AccountData
+ (*GetAccountRangeResponse)(nil), // 23: harmony.stream.sync.message.GetAccountRangeResponse
+ (*StorageData)(nil), // 24: harmony.stream.sync.message.StorageData
+ (*StoragesData)(nil), // 25: harmony.stream.sync.message.StoragesData
+ (*GetStorageRangesResponse)(nil), // 26: harmony.stream.sync.message.GetStorageRangesResponse
+ (*GetByteCodesResponse)(nil), // 27: harmony.stream.sync.message.GetByteCodesResponse
+ (*GetTrieNodesResponse)(nil), // 28: harmony.stream.sync.message.GetTrieNodesResponse
+ nil, // 29: harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry
}
var file_msg_proto_depIdxs = []int32{
1, // 0: harmony.stream.sync.message.Message.req:type_name -> harmony.stream.sync.message.Request
- 8, // 1: harmony.stream.sync.message.Message.resp:type_name -> harmony.stream.sync.message.Response
+ 13, // 1: harmony.stream.sync.message.Message.resp:type_name -> harmony.stream.sync.message.Response
2, // 2: harmony.stream.sync.message.Request.get_block_number_request:type_name -> harmony.stream.sync.message.GetBlockNumberRequest
3, // 3: harmony.stream.sync.message.Request.get_block_hashes_request:type_name -> harmony.stream.sync.message.GetBlockHashesRequest
4, // 4: harmony.stream.sync.message.Request.get_blocks_by_num_request:type_name -> harmony.stream.sync.message.GetBlocksByNumRequest
5, // 5: harmony.stream.sync.message.Request.get_blocks_by_hashes_request:type_name -> harmony.stream.sync.message.GetBlocksByHashesRequest
6, // 6: harmony.stream.sync.message.Request.get_node_data_request:type_name -> harmony.stream.sync.message.GetNodeDataRequest
7, // 7: harmony.stream.sync.message.Request.get_receipts_request:type_name -> harmony.stream.sync.message.GetReceiptsRequest
- 9, // 8: harmony.stream.sync.message.Response.error_response:type_name -> harmony.stream.sync.message.ErrorResponse
- 10, // 9: harmony.stream.sync.message.Response.get_block_number_response:type_name -> harmony.stream.sync.message.GetBlockNumberResponse
- 11, // 10: harmony.stream.sync.message.Response.get_block_hashes_response:type_name -> harmony.stream.sync.message.GetBlockHashesResponse
- 12, // 11: harmony.stream.sync.message.Response.get_blocks_by_num_response:type_name -> harmony.stream.sync.message.GetBlocksByNumResponse
- 13, // 12: harmony.stream.sync.message.Response.get_blocks_by_hashes_response:type_name -> harmony.stream.sync.message.GetBlocksByHashesResponse
- 14, // 13: harmony.stream.sync.message.Response.get_node_data_response:type_name -> harmony.stream.sync.message.GetNodeDataResponse
- 16, // 14: harmony.stream.sync.message.Response.get_receipts_response:type_name -> harmony.stream.sync.message.GetReceiptsResponse
- 17, // 15: harmony.stream.sync.message.GetReceiptsResponse.receipts:type_name -> harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry
- 15, // 16: harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry.value:type_name -> harmony.stream.sync.message.Receipts
- 17, // [17:17] is the sub-list for method output_type
- 17, // [17:17] is the sub-list for method input_type
- 17, // [17:17] is the sub-list for extension type_name
- 17, // [17:17] is the sub-list for extension extendee
- 0, // [0:17] is the sub-list for field type_name
+ 8, // 8: harmony.stream.sync.message.Request.get_account_range_request:type_name -> harmony.stream.sync.message.GetAccountRangeRequest
+ 9, // 9: harmony.stream.sync.message.Request.get_storage_ranges_request:type_name -> harmony.stream.sync.message.GetStorageRangesRequest
+ 10, // 10: harmony.stream.sync.message.Request.get_byte_codes_request:type_name -> harmony.stream.sync.message.GetByteCodesRequest
+ 12, // 11: harmony.stream.sync.message.Request.get_trie_nodes_request:type_name -> harmony.stream.sync.message.GetTrieNodesRequest
+ 11, // 12: harmony.stream.sync.message.GetTrieNodesRequest.paths:type_name -> harmony.stream.sync.message.TrieNodePathSet
+ 14, // 13: harmony.stream.sync.message.Response.error_response:type_name -> harmony.stream.sync.message.ErrorResponse
+ 15, // 14: harmony.stream.sync.message.Response.get_block_number_response:type_name -> harmony.stream.sync.message.GetBlockNumberResponse
+ 16, // 15: harmony.stream.sync.message.Response.get_block_hashes_response:type_name -> harmony.stream.sync.message.GetBlockHashesResponse
+ 17, // 16: harmony.stream.sync.message.Response.get_blocks_by_num_response:type_name -> harmony.stream.sync.message.GetBlocksByNumResponse
+ 18, // 17: harmony.stream.sync.message.Response.get_blocks_by_hashes_response:type_name -> harmony.stream.sync.message.GetBlocksByHashesResponse
+ 19, // 18: harmony.stream.sync.message.Response.get_node_data_response:type_name -> harmony.stream.sync.message.GetNodeDataResponse
+ 21, // 19: harmony.stream.sync.message.Response.get_receipts_response:type_name -> harmony.stream.sync.message.GetReceiptsResponse
+ 23, // 20: harmony.stream.sync.message.Response.get_account_range_response:type_name -> harmony.stream.sync.message.GetAccountRangeResponse
+ 26, // 21: harmony.stream.sync.message.Response.get_storage_ranges_response:type_name -> harmony.stream.sync.message.GetStorageRangesResponse
+ 27, // 22: harmony.stream.sync.message.Response.get_byte_codes_response:type_name -> harmony.stream.sync.message.GetByteCodesResponse
+ 28, // 23: harmony.stream.sync.message.Response.get_trie_nodes_response:type_name -> harmony.stream.sync.message.GetTrieNodesResponse
+ 29, // 24: harmony.stream.sync.message.GetReceiptsResponse.receipts:type_name -> harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry
+ 22, // 25: harmony.stream.sync.message.GetAccountRangeResponse.accounts:type_name -> harmony.stream.sync.message.AccountData
+ 24, // 26: harmony.stream.sync.message.StoragesData.data:type_name -> harmony.stream.sync.message.StorageData
+ 25, // 27: harmony.stream.sync.message.GetStorageRangesResponse.slots:type_name -> harmony.stream.sync.message.StoragesData
+ 20, // 28: harmony.stream.sync.message.GetReceiptsResponse.ReceiptsEntry.value:type_name -> harmony.stream.sync.message.Receipts
+ 29, // [29:29] is the sub-list for method output_type
+ 29, // [29:29] is the sub-list for method input_type
+ 29, // [29:29] is the sub-list for extension type_name
+ 29, // [29:29] is the sub-list for extension extendee
+ 0, // [0:29] is the sub-list for field type_name
}
func init() { file_msg_proto_init() }
@@ -1403,7 +2335,7 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Response); i {
+ switch v := v.(*GetAccountRangeRequest); i {
case 0:
return &v.state
case 1:
@@ -1415,7 +2347,7 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ErrorResponse); i {
+ switch v := v.(*GetStorageRangesRequest); i {
case 0:
return &v.state
case 1:
@@ -1427,7 +2359,7 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetBlockNumberResponse); i {
+ switch v := v.(*GetByteCodesRequest); i {
case 0:
return &v.state
case 1:
@@ -1439,7 +2371,7 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetBlockHashesResponse); i {
+ switch v := v.(*TrieNodePathSet); i {
case 0:
return &v.state
case 1:
@@ -1451,7 +2383,7 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetBlocksByNumResponse); i {
+ switch v := v.(*GetTrieNodesRequest); i {
case 0:
return &v.state
case 1:
@@ -1463,7 +2395,7 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetBlocksByHashesResponse); i {
+ switch v := v.(*Response); i {
case 0:
return &v.state
case 1:
@@ -1475,7 +2407,7 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNodeDataResponse); i {
+ switch v := v.(*ErrorResponse); i {
case 0:
return &v.state
case 1:
@@ -1487,7 +2419,7 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Receipts); i {
+ switch v := v.(*GetBlockNumberResponse); i {
case 0:
return &v.state
case 1:
@@ -1499,6 +2431,66 @@ func file_msg_proto_init() {
}
}
file_msg_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBlockHashesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBlocksByNumResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetBlocksByHashesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetNodeDataResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Receipts); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetReceiptsResponse); i {
case 0:
return &v.state
@@ -1510,6 +2502,90 @@ func file_msg_proto_init() {
return nil
}
}
+ file_msg_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AccountData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetAccountRangeResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StorageData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StoragesData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetStorageRangesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetByteCodesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_msg_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GetTrieNodesResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
}
file_msg_proto_msgTypes[0].OneofWrappers = []interface{}{
(*Message_Req)(nil),
@@ -1522,8 +2598,12 @@ func file_msg_proto_init() {
(*Request_GetBlocksByHashesRequest)(nil),
(*Request_GetNodeDataRequest)(nil),
(*Request_GetReceiptsRequest)(nil),
+ (*Request_GetAccountRangeRequest)(nil),
+ (*Request_GetStorageRangesRequest)(nil),
+ (*Request_GetByteCodesRequest)(nil),
+ (*Request_GetTrieNodesRequest)(nil),
}
- file_msg_proto_msgTypes[8].OneofWrappers = []interface{}{
+ file_msg_proto_msgTypes[13].OneofWrappers = []interface{}{
(*Response_ErrorResponse)(nil),
(*Response_GetBlockNumberResponse)(nil),
(*Response_GetBlockHashesResponse)(nil),
@@ -1531,6 +2611,10 @@ func file_msg_proto_init() {
(*Response_GetBlocksByHashesResponse)(nil),
(*Response_GetNodeDataResponse)(nil),
(*Response_GetReceiptsResponse)(nil),
+ (*Response_GetAccountRangeResponse)(nil),
+ (*Response_GetStorageRangesResponse)(nil),
+ (*Response_GetByteCodesResponse)(nil),
+ (*Response_GetTrieNodesResponse)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
@@ -1538,7 +2622,7 @@ func file_msg_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_msg_proto_rawDesc,
NumEnums: 0,
- NumMessages: 18,
+ NumMessages: 30,
NumExtensions: 0,
NumServices: 0,
},
diff --git a/p2p/stream/protocols/sync/message/msg.proto b/p2p/stream/protocols/sync/message/msg.proto
index f48341868f..85fa67460d 100644
--- a/p2p/stream/protocols/sync/message/msg.proto
+++ b/p2p/stream/protocols/sync/message/msg.proto
@@ -19,6 +19,10 @@ message Request {
GetBlocksByHashesRequest get_blocks_by_hashes_request = 5;
GetNodeDataRequest get_node_data_request = 6;
GetReceiptsRequest get_receipts_request = 7;
+ GetAccountRangeRequest get_account_range_request = 8;
+ GetStorageRangesRequest get_storage_ranges_request = 9;
+ GetByteCodesRequest get_byte_codes_request = 10;
+ GetTrieNodesRequest get_trie_nodes_request = 11;
}
}
@@ -44,6 +48,36 @@ message GetReceiptsRequest {
repeated bytes block_hashes = 1;
}
+message GetAccountRangeRequest {
+ bytes root = 1;
+ bytes origin = 2;
+ bytes limit = 3;
+ uint64 bytes = 4;
+}
+
+message GetStorageRangesRequest {
+ bytes root = 1;
+ repeated bytes accounts = 2;
+ bytes origin = 3;
+ bytes limit = 4;
+ uint64 bytes = 5;
+}
+
+message GetByteCodesRequest {
+ repeated bytes hashes = 1;
+ uint64 bytes = 2;
+}
+
+message TrieNodePathSet {
+ repeated bytes pathset = 1;
+}
+
+message GetTrieNodesRequest {
+ bytes root = 1;
+ repeated TrieNodePathSet paths = 2;
+ uint64 bytes = 3;
+}
+
message Response {
uint64 req_id = 1;
oneof response {
@@ -54,6 +88,10 @@ message Response {
GetBlocksByHashesResponse get_blocks_by_hashes_response = 6;
GetNodeDataResponse get_node_data_response = 7;
GetReceiptsResponse get_receipts_response = 8;
+ GetAccountRangeResponse get_account_range_response = 9;
+ GetStorageRangesResponse get_storage_ranges_response = 10;
+ GetByteCodesResponse get_byte_codes_response = 11;
+ GetTrieNodesResponse get_trie_nodes_response = 12;
}
}
@@ -90,3 +128,35 @@ message Receipts {
message GetReceiptsResponse {
map receipts = 1;
}
+
+message AccountData {
+ bytes hash = 1;
+ bytes body = 2;
+}
+
+message GetAccountRangeResponse {
+ repeated AccountData accounts = 1;
+ repeated bytes proof = 2;
+}
+
+message StorageData {
+ bytes hash = 1;
+ bytes body = 2;
+}
+
+message StoragesData {
+ repeated StorageData data = 1;
+}
+
+message GetStorageRangesResponse {
+ repeated StoragesData slots = 1;
+ repeated bytes proof = 2;
+}
+
+message GetByteCodesResponse {
+ repeated bytes codes = 1;
+}
+
+message GetTrieNodesResponse {
+ repeated bytes nodes = 1;
+}
\ No newline at end of file
diff --git a/p2p/stream/protocols/sync/message/parse.go b/p2p/stream/protocols/sync/message/parse.go
index b0bf360a8a..4c9849c06c 100644
--- a/p2p/stream/protocols/sync/message/parse.go
+++ b/p2p/stream/protocols/sync/message/parse.go
@@ -111,3 +111,67 @@ func (msg *Message) GetNodeDataResponse() (*GetNodeDataResponse, error) {
}
return gnResp, nil
}
+
+// GetAccountRangesResponse parse the message to GetAccountRangesResponse
+func (msg *Message) GetAccountRangesResponse() (*GetAccountRangeResponse, error) {
+ resp := msg.GetResp()
+ if resp == nil {
+ return nil, errors.New("not response message")
+ }
+ if errResp := resp.GetErrorResponse(); errResp != nil {
+ return nil, &ResponseError{errResp.Error}
+ }
+ gnResp := resp.GetGetAccountRangeResponse()
+ if gnResp == nil {
+ return nil, errors.New("not GetGetAccountRangeResponse")
+ }
+ return gnResp, nil
+}
+
+// GetStorageRangesResponse parse the message to GetStorageRangesResponse
+func (msg *Message) GetStorageRangesResponse() (*GetStorageRangesResponse, error) {
+ resp := msg.GetResp()
+ if resp == nil {
+ return nil, errors.New("not response message")
+ }
+ if errResp := resp.GetErrorResponse(); errResp != nil {
+ return nil, &ResponseError{errResp.Error}
+ }
+ gnResp := resp.GetGetStorageRangesResponse()
+ if gnResp == nil {
+ return nil, errors.New("not GetGetStorageRangesResponse")
+ }
+ return gnResp, nil
+}
+
+// GetByteCodesResponse parse the message to GetByteCodesResponse
+func (msg *Message) GetByteCodesResponse() (*GetByteCodesResponse, error) {
+ resp := msg.GetResp()
+ if resp == nil {
+ return nil, errors.New("not response message")
+ }
+ if errResp := resp.GetErrorResponse(); errResp != nil {
+ return nil, &ResponseError{errResp.Error}
+ }
+ gnResp := resp.GetGetByteCodesResponse()
+ if gnResp == nil {
+ return nil, errors.New("not GetByteCodesResponse")
+ }
+ return gnResp, nil
+}
+
+// GetTrieNodesResponse parse the message to GetTrieNodesResponse
+func (msg *Message) GetTrieNodesResponse() (*GetTrieNodesResponse, error) {
+ resp := msg.GetResp()
+ if resp == nil {
+ return nil, errors.New("not response message")
+ }
+ if errResp := resp.GetErrorResponse(); errResp != nil {
+ return nil, &ResponseError{errResp.Error}
+ }
+ gnResp := resp.GetGetTrieNodesResponse()
+ if gnResp == nil {
+ return nil, errors.New("not GetTrieNodesResponse")
+ }
+ return gnResp, nil
+}
diff --git a/p2p/stream/protocols/sync/protocol.go b/p2p/stream/protocols/sync/protocol.go
index 0cb48bfff3..b4e84592ae 100644
--- a/p2p/stream/protocols/sync/protocol.go
+++ b/p2p/stream/protocols/sync/protocol.go
@@ -271,8 +271,6 @@ func (p *Protocol) RemoveStream(stID sttypes.StreamID) {
if exist && st != nil {
//TODO: log this incident with reason
st.Close()
- // stream manager removes this stream from the list and triggers discovery if number of streams are not enough
- p.sm.RemoveStream(stID) //TODO: double check to see if this part is needed
p.logger.Info().
Str("stream ID", string(stID)).
Msg("stream removed")
@@ -290,8 +288,6 @@ func (p *Protocol) StreamFailed(stID sttypes.StreamID, reason string) {
Msg("stream failed")
if st.FailedTimes() >= MaxStreamFailures {
st.Close()
- // stream manager removes this stream from the list and triggers discovery if number of streams are not enough
- p.sm.RemoveStream(stID) //TODO: double check to see if this part is needed
p.logger.Warn().
Str("stream ID", string(st.ID())).
Msg("stream removed")
diff --git a/p2p/stream/protocols/sync/stream.go b/p2p/stream/protocols/sync/stream.go
index 56419767f0..3077a8a135 100644
--- a/p2p/stream/protocols/sync/stream.go
+++ b/p2p/stream/protocols/sync/stream.go
@@ -10,6 +10,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
protobuf "github.com/golang/protobuf/proto"
+ "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
syncpb "github.com/harmony-one/harmony/p2p/stream/protocols/sync/message"
sttypes "github.com/harmony-one/harmony/p2p/stream/types"
libp2p_network "github.com/libp2p/go-libp2p/core/network"
@@ -188,6 +189,18 @@ func (st *syncStream) handleReq(req *syncpb.Request) error {
if rReq := req.GetGetReceiptsRequest(); rReq != nil {
return st.handleGetReceiptsRequest(req.ReqId, rReq)
}
+ if ndReq := req.GetGetAccountRangeRequest(); ndReq != nil {
+ return st.handleGetAccountRangeRequest(req.ReqId, ndReq)
+ }
+ if ndReq := req.GetGetStorageRangesRequest(); ndReq != nil {
+ return st.handleGetStorageRangesRequest(req.ReqId, ndReq)
+ }
+ if ndReq := req.GetGetByteCodesRequest(); ndReq != nil {
+ return st.handleGetByteCodesRequest(req.ReqId, ndReq)
+ }
+ if ndReq := req.GetGetTrieNodesRequest(); ndReq != nil {
+ return st.handleGetTrieNodesRequest(req.ReqId, ndReq)
+ }
// unsupported request type
return st.handleUnknownRequest(req.ReqId)
}
@@ -308,6 +321,95 @@ func (st *syncStream) handleGetReceiptsRequest(rid uint64, req *syncpb.GetReceip
return errors.Wrap(err, "[GetReceipts]")
}
+func (st *syncStream) handleGetAccountRangeRequest(rid uint64, req *syncpb.GetAccountRangeRequest) error {
+ serverRequestCounterVec.With(prometheus.Labels{
+ "topic": string(st.ProtoID()),
+ "request_type": "getAccountRangeRequest",
+ }).Inc()
+
+ root := common.BytesToHash(req.Root)
+ origin := common.BytesToHash(req.Origin)
+ limit := common.BytesToHash(req.Limit)
+ resp, err := st.computeGetAccountRangeRequest(rid, root, origin, limit, req.Bytes)
+ if resp == nil && err != nil {
+ resp = syncpb.MakeErrorResponseMessage(rid, err)
+ }
+ if writeErr := st.writeMsg(resp); writeErr != nil {
+ if err == nil {
+ err = writeErr
+ } else {
+ err = fmt.Errorf("%v; [writeMsg] %v", err.Error(), writeErr)
+ }
+ }
+ return errors.Wrap(err, "[GetAccountRange]")
+}
+
+func (st *syncStream) handleGetStorageRangesRequest(rid uint64, req *syncpb.GetStorageRangesRequest) error {
+ serverRequestCounterVec.With(prometheus.Labels{
+ "topic": string(st.ProtoID()),
+ "request_type": "getStorageRangesRequest",
+ }).Inc()
+
+ root := common.BytesToHash(req.Root)
+ accounts := bytesToHashes(req.Accounts)
+ origin := common.BytesToHash(req.Origin)
+ limit := common.BytesToHash(req.Limit)
+ resp, err := st.computeGetStorageRangesRequest(rid, root, accounts, origin, limit, req.Bytes)
+ if resp == nil && err != nil {
+ resp = syncpb.MakeErrorResponseMessage(rid, err)
+ }
+ if writeErr := st.writeMsg(resp); writeErr != nil {
+ if err == nil {
+ err = writeErr
+ } else {
+ err = fmt.Errorf("%v; [writeMsg] %v", err.Error(), writeErr)
+ }
+ }
+ return errors.Wrap(err, "[GetStorageRanges]")
+}
+
+func (st *syncStream) handleGetByteCodesRequest(rid uint64, req *syncpb.GetByteCodesRequest) error {
+ serverRequestCounterVec.With(prometheus.Labels{
+ "topic": string(st.ProtoID()),
+ "request_type": "getByteCodesRequest",
+ }).Inc()
+
+ hashes := bytesToHashes(req.Hashes)
+ resp, err := st.computeGetByteCodesRequest(rid, hashes, req.Bytes)
+ if resp == nil && err != nil {
+ resp = syncpb.MakeErrorResponseMessage(rid, err)
+ }
+ if writeErr := st.writeMsg(resp); writeErr != nil {
+ if err == nil {
+ err = writeErr
+ } else {
+ err = fmt.Errorf("%v; [writeMsg] %v", err.Error(), writeErr)
+ }
+ }
+ return errors.Wrap(err, "[GetByteCodes]")
+}
+
+func (st *syncStream) handleGetTrieNodesRequest(rid uint64, req *syncpb.GetTrieNodesRequest) error {
+ serverRequestCounterVec.With(prometheus.Labels{
+ "topic": string(st.ProtoID()),
+ "request_type": "getTrieNodesRequest",
+ }).Inc()
+
+ root := common.BytesToHash(req.Root)
+ resp, err := st.computeGetTrieNodesRequest(rid, root, req.Paths, req.Bytes)
+ if resp == nil && err != nil {
+ resp = syncpb.MakeErrorResponseMessage(rid, err)
+ }
+ if writeErr := st.writeMsg(resp); writeErr != nil {
+ if err == nil {
+ err = writeErr
+ } else {
+ err = fmt.Errorf("%v; [writeMsg] %v", err.Error(), writeErr)
+ }
+ }
+ return errors.Wrap(err, "[GetTrieNodes]")
+}
+
func (st *syncStream) handleUnknownRequest(rid uint64) error {
serverRequestCounterVec.With(prometheus.Labels{
"topic": string(st.ProtoID()),
@@ -453,6 +555,74 @@ func (st *syncStream) computeGetReceipts(rid uint64, hs []common.Hash) (*syncpb.
return syncpb.MakeGetReceiptsResponseMessage(rid, normalizedReceipts), nil
}
+func (st *syncStream) computeGetAccountRangeRequest(rid uint64, root common.Hash, origin common.Hash, limit common.Hash, bytes uint64) (*syncpb.Message, error) {
+ if bytes == 0 {
+ return nil, fmt.Errorf("zero account ranges bytes requested")
+ }
+ if bytes > softResponseLimit {
+ return nil, fmt.Errorf("requested bytes exceed limit")
+ }
+ accounts, proof, err := st.chain.getAccountRange(root, origin, limit, bytes)
+ if err != nil {
+ return nil, err
+ }
+ return syncpb.MakeGetAccountRangeResponseMessage(rid, accounts, proof), nil
+}
+
+func (st *syncStream) computeGetStorageRangesRequest(rid uint64, root common.Hash, accounts []common.Hash, origin common.Hash, limit common.Hash, bytes uint64) (*syncpb.Message, error) {
+ if bytes == 0 {
+ return nil, fmt.Errorf("zero storage ranges bytes requested")
+ }
+ if bytes > softResponseLimit {
+ return nil, fmt.Errorf("requested bytes exceed limit")
+ }
+ if len(accounts) > GetStorageRangesRequestCap {
+ err := fmt.Errorf("GetStorageRangesRequest amount exceed cap: %v > %v", len(accounts), GetStorageRangesRequestCap)
+ return nil, err
+ }
+ slots, proofs, err := st.chain.getStorageRanges(root, accounts, origin, limit, bytes)
+ if err != nil {
+ return nil, err
+ }
+ return syncpb.MakeGetStorageRangesResponseMessage(rid, slots, proofs), nil
+}
+
+func (st *syncStream) computeGetByteCodesRequest(rid uint64, hs []common.Hash, bytes uint64) (*syncpb.Message, error) {
+ if bytes == 0 {
+ return nil, fmt.Errorf("zero byte code bytes requested")
+ }
+ if bytes > softResponseLimit {
+ return nil, fmt.Errorf("requested bytes exceed limit")
+ }
+ if len(hs) > GetByteCodesRequestCap {
+ err := fmt.Errorf("GetByteCodesRequest amount exceed cap: %v > %v", len(hs), GetByteCodesRequestCap)
+ return nil, err
+ }
+ codes, err := st.chain.getByteCodes(hs, bytes)
+ if err != nil {
+ return nil, err
+ }
+ return syncpb.MakeGetByteCodesResponseMessage(rid, codes), nil
+}
+
+func (st *syncStream) computeGetTrieNodesRequest(rid uint64, root common.Hash, paths []*message.TrieNodePathSet, bytes uint64) (*syncpb.Message, error) {
+ if bytes == 0 {
+ return nil, fmt.Errorf("zero trie node bytes requested")
+ }
+ if bytes > softResponseLimit {
+ return nil, fmt.Errorf("requested bytes exceed limit")
+ }
+ if len(paths) > GetTrieNodesRequestCap {
+ err := fmt.Errorf("GetTrieNodesRequest amount exceed cap: %v > %v", len(paths), GetTrieNodesRequestCap)
+ return nil, err
+ }
+ nodes, err := st.chain.getTrieNodes(root, paths, bytes, time.Now())
+ if err != nil {
+ return nil, err
+ }
+ return syncpb.MakeGetTrieNodesResponseMessage(rid, nodes), nil
+}
+
func bytesToHashes(bs [][]byte) []common.Hash {
hs := make([]common.Hash, 0, len(bs))
for _, b := range bs {
diff --git a/p2p/stream/protocols/sync/stream_test.go b/p2p/stream/protocols/sync/stream_test.go
index f33bc3eb9f..3b538c14b8 100644
--- a/p2p/stream/protocols/sync/stream_test.go
+++ b/p2p/stream/protocols/sync/stream_test.go
@@ -60,6 +60,30 @@ var (
}
testGetNodeDataRequest = syncpb.MakeGetNodeDataRequest(testGetNodeData)
testGetNodeDataRequestMsg = syncpb.MakeMessageFromRequest(testGetNodeDataRequest)
+
+ maxBytes = uint64(500)
+ root = numberToHash(1)
+ origin = numberToHash(2)
+ limit = numberToHash(3)
+
+ testHashes = []common.Hash{
+ numberToHash(1),
+ numberToHash(2),
+ }
+
+ testAccounts = []common.Hash{account1, account2}
+
+ testGetAccountRangesRequest = syncpb.MakeGetAccountRangeRequest(root, origin, limit, maxBytes)
+ testGetAccountRangesRequestMsg = syncpb.MakeMessageFromRequest(testGetAccountRangesRequest)
+
+ testGetStorageRangesRequest = syncpb.MakeGetStorageRangesRequest(root, testAccounts, origin, limit, maxBytes)
+ testGetStorageRangesRequestMsg = syncpb.MakeMessageFromRequest(testGetStorageRangesRequest)
+
+ testGetByteCodesRequest = syncpb.MakeGetByteCodesRequest(testHashes, maxBytes)
+ testGetByteCodesRequestMsg = syncpb.MakeMessageFromRequest(testGetByteCodesRequest)
+
+ testGetTrieNodesRequest = syncpb.MakeGetTrieNodesRequest(root, testPaths, maxBytes)
+ testGetTrieNodesRequestMsg = syncpb.MakeMessageFromRequest(testGetTrieNodesRequest)
)
func TestSyncStream_HandleGetBlocksByRequest(t *testing.T) {
@@ -188,6 +212,90 @@ func TestSyncStream_HandleGetNodeData(t *testing.T) {
}
}
+func TestSyncStream_HandleGetAccountRanges(t *testing.T) {
+ st, remoteSt := makeTestSyncStream()
+
+ go st.run()
+ defer close(st.closeC)
+
+ req := testGetAccountRangesRequestMsg
+ b, _ := protobuf.Marshal(req)
+ err := remoteSt.WriteBytes(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time.Sleep(200 * time.Millisecond)
+ receivedBytes, _ := remoteSt.ReadBytes()
+
+ if err := checkAccountRangeResult(maxBytes, receivedBytes); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSyncStream_HandleGetStorageRanges(t *testing.T) {
+ st, remoteSt := makeTestSyncStream()
+
+ go st.run()
+ defer close(st.closeC)
+
+ req := testGetStorageRangesRequestMsg
+ b, _ := protobuf.Marshal(req)
+ err := remoteSt.WriteBytes(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time.Sleep(200 * time.Millisecond)
+ receivedBytes, _ := remoteSt.ReadBytes()
+
+ if err := checkStorageRangesResult(testAccounts, maxBytes, receivedBytes); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSyncStream_HandleGetByteCodesResult(t *testing.T) {
+ st, remoteSt := makeTestSyncStream()
+
+ go st.run()
+ defer close(st.closeC)
+
+ req := testGetByteCodesRequestMsg
+ b, _ := protobuf.Marshal(req)
+ err := remoteSt.WriteBytes(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time.Sleep(200 * time.Millisecond)
+ receivedBytes, _ := remoteSt.ReadBytes()
+
+ if err := checkByteCodesResult(testHashes, maxBytes, receivedBytes); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSyncStream_HandleGetTrieNodes(t *testing.T) {
+ st, remoteSt := makeTestSyncStream()
+
+ go st.run()
+ defer close(st.closeC)
+
+ req := testGetTrieNodesRequestMsg
+ b, _ := protobuf.Marshal(req)
+ err := remoteSt.WriteBytes(b)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ time.Sleep(200 * time.Millisecond)
+ receivedBytes, _ := remoteSt.ReadBytes()
+
+ if err := checkTrieNodesResult(testHashes, maxBytes, receivedBytes); err != nil {
+ t.Fatal(err)
+ }
+}
+
func makeTestSyncStream() (*syncStream, *testRemoteBaseStream) {
localRaw, remoteRaw := makePairP2PStreams()
remote := newTestRemoteBaseStream(remoteRaw)
diff --git a/rosetta/infra/README.md b/rosetta/infra/README.md
index 2c7d620c49..c04f998502 100644
--- a/rosetta/infra/README.md
+++ b/rosetta/infra/README.md
@@ -108,7 +108,7 @@ Note that the directory structure for `/root/data` (== `./data`) should look som
```
### Inspecting Logs
-If you mount `./data` on the host to `/root/data` in the container, you van view the harmony node logs at
+If you mount `./data` on the host to `/root/data` in the container, you can view the harmony node logs at
`./data/logs/` on your host machine.
### View rosetta request logs
diff --git a/rpc/eth/types.go b/rpc/eth/types.go
index c51d604b2d..a319a8fc12 100644
--- a/rpc/eth/types.go
+++ b/rpc/eth/types.go
@@ -100,6 +100,37 @@ func NewTransaction(
}
return result, nil
}
+func NewTransactionFromTransaction(
+ tx *types.Transaction, blockHash common.Hash,
+ blockNumber uint64, timestamp uint64, index uint64,
+) (*Transaction, error) {
+ from, err := tx.SenderAddress()
+ if err != nil {
+ return nil, fmt.Errorf("unable to get sender address: %w", err)
+ }
+ v, r, s := tx.RawSignatureValues()
+
+ result := &Transaction{
+ From: from,
+ Gas: hexutil.Uint64(tx.GasLimit()),
+ GasPrice: (*hexutil.Big)(tx.GasPrice()),
+ Hash: tx.Hash(),
+ Input: hexutil.Bytes(tx.Data()),
+ Nonce: hexutil.Uint64(tx.Nonce()),
+ To: tx.To(),
+ Value: (*hexutil.Big)(tx.Value()),
+ Timestamp: hexutil.Uint64(timestamp),
+ V: (*hexutil.Big)(v),
+ R: (*hexutil.Big)(r),
+ S: (*hexutil.Big)(s),
+ }
+ if blockHash != (common.Hash{}) {
+ result.BlockHash = &blockHash
+ result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber))
+ result.TransactionIndex = (*hexutil.Uint64)(&index)
+ }
+ return result, nil
+}
// NewReceipt returns the RPC data for a new receipt
func NewReceipt(senderAddr common.Address, tx *types.EthTransaction, blockHash common.Hash, blockNumber, blockIndex uint64, receipt *types.Receipt) (map[string]interface{}, error) {
diff --git a/rpc/private_debug.go b/rpc/private_debug.go
index 921d6645d5..97ade82dd2 100644
--- a/rpc/private_debug.go
+++ b/rpc/private_debug.go
@@ -65,3 +65,10 @@ func (s *PrivateDebugService) GetLastSigningPower(
) (float64, error) {
return s.hmy.NodeAPI.GetLastSigningPower()
}
+
+// GetLastSigningPower2 get last signed power
+func (s *PrivateDebugService) GetLastSigningPower2(
+ ctx context.Context,
+) (float64, error) {
+ return s.hmy.NodeAPI.GetLastSigningPower2()
+}
diff --git a/scripts/package/readme.md b/scripts/package/readme.md
index 9ea4af4090..510bc387f3 100644
--- a/scripts/package/readme.md
+++ b/scripts/package/readme.md
@@ -19,7 +19,7 @@ The default blockchain DBs are stored in `/home/harmony/harmony_db_?` directory.
The configuration of harmony process is in `/etc/harmony/harmony.conf`.
# Package Manager
-Please take sometime to learn about the package managers used on Fedora/Debian based distributions.
+Please take some time to learn about the package managers used on Fedora/Debian based distributions.
There are many other package managers can be used to manage rpm/deb packages like [Apt],
or [Yum]
@@ -128,7 +128,7 @@ The default configuration is for validators on mainnet. No need to run `harmony-
* `systemctl status harmony` to check status of node
# Change node configuration
-The node configuration file is in `/etc/harmony/harmony.conf`. Please edit the file as you needed.
+The node configuration file is in `/etc/harmony/harmony.conf`. Please edit the file as you need.
```bash
sudo vim /etc/harmony/harmony.conf
```
diff --git a/scripts/travis_rosetta_checker.sh b/scripts/travis_rosetta_checker.sh
index b2e395fdba..d2f98569f9 100644
--- a/scripts/travis_rosetta_checker.sh
+++ b/scripts/travis_rosetta_checker.sh
@@ -1,12 +1,14 @@
#!/usr/bin/env bash
set -e
+echo $TRAVIS_PULL_REQUEST_BRANCH
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
echo $DIR
echo $GOPATH
cd $GOPATH/src/github.com/harmony-one/harmony-test
git fetch
git pull
+git checkout $TRAVIS_PULL_REQUEST_BRANCH || true
git branch --show-current
cd localnet
docker build -t harmonyone/localnet-test .
diff --git a/scripts/travis_rpc_checker.sh b/scripts/travis_rpc_checker.sh
index b057452f88..5de2ef93b8 100755
--- a/scripts/travis_rpc_checker.sh
+++ b/scripts/travis_rpc_checker.sh
@@ -1,11 +1,13 @@
#!/usr/bin/env bash
set -e
+echo $TRAVIS_PULL_REQUEST_BRANCH
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
echo $DIR
echo $GOPATH
cd $GOPATH/src/github.com/harmony-one/harmony-test
git fetch
+git checkout $TRAVIS_PULL_REQUEST_BRANCH || true
git pull
git branch --show-current
cd localnet
diff --git a/shard/committee/assignment.go b/shard/committee/assignment.go
index 85162e6712..4978b61559 100644
--- a/shard/committee/assignment.go
+++ b/shard/committee/assignment.go
@@ -25,19 +25,6 @@ import (
"github.com/pkg/errors"
)
-// ValidatorListProvider ..
-type ValidatorListProvider interface {
- Compute(
- epoch *big.Int, reader DataProvider,
- ) (*shard.State, error)
- ReadFromDB(epoch *big.Int, reader DataProvider) (*shard.State, error)
-}
-
-// Reader is committee.Reader and it is the API that committee membership assignment needs
-type Reader interface {
- ValidatorListProvider
-}
-
// StakingCandidatesReader ..
type StakingCandidatesReader interface {
CurrentBlock() *types.Block
@@ -272,7 +259,7 @@ type partialStakingEnabled struct{}
var (
// WithStakingEnabled ..
- WithStakingEnabled Reader = partialStakingEnabled{}
+ WithStakingEnabled = partialStakingEnabled{}
// ErrComputeForEpochInPast ..
ErrComputeForEpochInPast = errors.New("cannot compute for epoch in past")
)
diff --git a/test/chain/chain/chain_makers.go b/test/chain/chain/chain_makers.go
index 2b82beb575..122540038b 100644
--- a/test/chain/chain/chain_makers.go
+++ b/test/chain/chain/chain_makers.go
@@ -22,12 +22,14 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/trie"
"github.com/harmony-one/harmony/core"
"github.com/harmony-one/harmony/block"
blockfactory "github.com/harmony-one/harmony/block/factory"
consensus_engine "github.com/harmony-one/harmony/consensus/engine"
"github.com/harmony-one/harmony/core/state"
+ "github.com/harmony-one/harmony/core/state/snapshot"
"github.com/harmony-one/harmony/core/types"
"github.com/harmony-one/harmony/core/vm"
"github.com/harmony-one/harmony/internal/params"
@@ -252,6 +254,7 @@ func (cr *fakeChainReader) GetReceiptsByHash(hash common.Hash) types.Receipts
func (cr *fakeChainReader) ContractCode(hash common.Hash) ([]byte, error) { return []byte{}, nil }
func (cr *fakeChainReader) ValidatorCode(hash common.Hash) ([]byte, error) { return []byte{}, nil }
func (cr *fakeChainReader) ReadShardState(epoch *big.Int) (*shard.State, error) { return nil, nil }
+func (cr *fakeChainReader) TrieDB() *trie.Database { return nil }
func (cr *fakeChainReader) TrieNode(hash common.Hash) ([]byte, error) { return []byte{}, nil }
func (cr *fakeChainReader) ReadValidatorList() ([]common.Address, error) { return nil, nil }
func (cr *fakeChainReader) ValidatorCandidates() []common.Address { return nil }
@@ -273,6 +276,9 @@ func (cr *fakeChainReader) ReadValidatorInformationAtState(
func (cr *fakeChainReader) StateAt(root common.Hash) (*state.DB, error) {
return nil, nil
}
+func (cr *fakeChainReader) Snapshots() *snapshot.Tree {
+ return nil
+}
func (cr *fakeChainReader) ReadValidatorSnapshot(
addr common.Address,
) (*staking.ValidatorSnapshot, error) {