From ec5be33b16e4a27b191ddd624188806c716173b3 Mon Sep 17 00:00:00 2001 From: ssd04 Date: Fri, 10 Jan 2025 01:04:56 +0200 Subject: [PATCH] add custom log for process block --- process/block/argProcessor.go | 2 + process/block/baseProcess.go | 123 +++++++++---------- process/block/displayBlock.go | 18 ++- process/block/displayMetaBlock.go | 14 ++- process/block/headerValidator.go | 18 ++- process/block/metablock.go | 181 +++++++++++++++------------- process/block/shardblock.go | 192 ++++++++++++++++-------------- 7 files changed, 296 insertions(+), 252 deletions(-) diff --git a/process/block/argProcessor.go b/process/block/argProcessor.go index df929214829..397981573ab 100644 --- a/process/block/argProcessor.go +++ b/process/block/argProcessor.go @@ -18,6 +18,7 @@ import ( "github.com/multiversx/mx-chain-go/sharding" "github.com/multiversx/mx-chain-go/sharding/nodesCoordinator" "github.com/multiversx/mx-chain-go/state" + logger "github.com/multiversx/mx-chain-logger-go" ) type coreComponentsHolder interface { @@ -94,6 +95,7 @@ type ArgBaseProcessor struct { BlockProcessingCutoffHandler cutoff.BlockProcessingCutoffHandler ManagedPeersHolder common.ManagedPeersHolder SentSignaturesTracker process.SentSignaturesTracker + Logger logger.Logger } // ArgShardProcessor holds all dependencies required by the process data factory in order to create diff --git a/process/block/baseProcess.go b/process/block/baseProcess.go index 4f2a3661ece..a4ea9d08d14 100644 --- a/process/block/baseProcess.go +++ b/process/block/baseProcess.go @@ -64,6 +64,7 @@ type hdrInfo struct { } type baseProcessor struct { + log logger.Logger shardCoordinator sharding.Coordinator nodesCoordinator nodesCoordinator.NodesCoordinator accountsDB map[state.AccountsDbIdentifier]state.AccountsAdapter @@ -171,14 +172,14 @@ func (bp *baseProcessor) checkBlockValidity( return nil } - log.Debug("hash does not match", + bp.log.Debug("hash does not match", "local block hash", bp.blockChain.GetGenesisHeaderHash(), "received previous hash", headerHandler.GetPrevHash()) return process.ErrBlockHashDoesNotMatch } - log.Debug("nonce does not match", + bp.log.Debug("nonce does not match", "local block nonce", 0, "received nonce", headerHandler.GetNonce()) @@ -186,7 +187,7 @@ func (bp *baseProcessor) checkBlockValidity( } if headerHandler.GetRound() <= currentBlockHeader.GetRound() { - log.Debug("round does not match", + bp.log.Debug("round does not match", "local block round", currentBlockHeader.GetRound(), "received block round", headerHandler.GetRound()) @@ -194,7 +195,7 @@ func (bp *baseProcessor) checkBlockValidity( } if headerHandler.GetNonce() != currentBlockHeader.GetNonce()+1 { - log.Debug("nonce does not match", + bp.log.Debug("nonce does not match", "local block nonce", currentBlockHeader.GetNonce(), "received nonce", headerHandler.GetNonce()) @@ -202,7 +203,7 @@ func (bp *baseProcessor) checkBlockValidity( } if !bytes.Equal(headerHandler.GetPrevHash(), bp.blockChain.GetCurrentBlockHeaderHash()) { - log.Debug("hash does not match", + bp.log.Debug("hash does not match", "local block hash", bp.blockChain.GetCurrentBlockHeaderHash(), "received previous hash", headerHandler.GetPrevHash()) @@ -210,7 +211,7 @@ func (bp *baseProcessor) checkBlockValidity( } if !bytes.Equal(headerHandler.GetPrevRandSeed(), currentBlockHeader.GetRandSeed()) { - log.Debug("random seed does not match", + bp.log.Debug("random seed does not match", "local random seed", currentBlockHeader.GetRandSeed(), "received previous random seed", headerHandler.GetPrevRandSeed()) @@ -241,7 +242,7 @@ func (bp *baseProcessor) checkScheduledRootHash(headerHandler data.HeaderHandler } if !bytes.Equal(additionalData.GetScheduledRootHash(), bp.getRootHash()) { - log.Debug("scheduled root hash does not match", + bp.log.Debug("scheduled root hash does not match", "current root hash", bp.getRootHash(), "header scheduled root hash", additionalData.GetScheduledRootHash()) return process.ErrScheduledRootHashDoesNotMatch @@ -255,7 +256,7 @@ func (bp *baseProcessor) checkScheduledRootHash(headerHandler data.HeaderHandler func (bp *baseProcessor) verifyStateRoot(rootHash []byte) bool { trieRootHash, err := bp.accountsDB[state.UserAccountsState].RootHash() if err != nil { - log.Debug("verify account.RootHash", "error", err.Error()) + bp.log.Debug("verify account.RootHash", "error", err.Error()) } return bytes.Equal(trieRootHash, rootHash) @@ -265,7 +266,7 @@ func (bp *baseProcessor) verifyStateRoot(rootHash []byte) bool { func (bp *baseProcessor) getRootHash() []byte { rootHash, err := bp.accountsDB[state.UserAccountsState].RootHash() if err != nil { - log.Trace("get account.RootHash", "error", err.Error()) + bp.log.Trace("get account.RootHash", "error", err.Error()) } return rootHash @@ -906,7 +907,7 @@ func (bp *baseProcessor) checkScheduledMiniBlocksValidity(headerHandler data.Hea scheduledMiniBlocks := bp.scheduledTxsExecutionHandler.GetScheduledMiniBlocks() if len(scheduledMiniBlocks) > len(headerHandler.GetMiniBlockHeadersHashes()) { - log.Debug("baseProcessor.checkScheduledMiniBlocksValidity", "num mbs scheduled", len(scheduledMiniBlocks), "num mbs received", len(headerHandler.GetMiniBlockHeadersHashes())) + bp.log.Debug("baseProcessor.checkScheduledMiniBlocksValidity", "num mbs scheduled", len(scheduledMiniBlocks), "num mbs received", len(headerHandler.GetMiniBlockHeadersHashes())) return process.ErrScheduledMiniBlocksMismatch } @@ -917,7 +918,7 @@ func (bp *baseProcessor) checkScheduledMiniBlocksValidity(headerHandler data.Hea } if !bytes.Equal(scheduledMiniBlockHash, headerHandler.GetMiniBlockHeadersHashes()[index]) { - log.Debug("baseProcessor.checkScheduledMiniBlocksValidity", "index", index, "scheduled mb hash", scheduledMiniBlockHash, "received mb hash", headerHandler.GetMiniBlockHeadersHashes()[index]) + bp.log.Debug("baseProcessor.checkScheduledMiniBlocksValidity", "index", index, "scheduled mb hash", scheduledMiniBlockHash, "received mb hash", headerHandler.GetMiniBlockHeadersHashes()[index]) return process.ErrScheduledMiniBlocksMismatch } } @@ -961,7 +962,7 @@ func (bp *baseProcessor) requestMissingFinalityAttestingHeaders( } if requestedHeaders > 0 { - log.Debug("requested missing finality attesting headers", + bp.log.Debug("requested missing finality attesting headers", "num headers", requestedHeaders, "shard", shardID) } @@ -995,7 +996,7 @@ func (bp *baseProcessor) cleanupPools(headerHandler data.HeaderHandler) { if bp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, headerHandler.GetEpoch()) { err := bp.dataPool.Proofs().CleanupProofsBehindNonce(bp.shardCoordinator.SelfId(), highestPrevFinalBlockNonce) if err != nil { - log.Warn("failed to cleanup notarized proofs behind nonce", + bp.log.Warn("failed to cleanup notarized proofs behind nonce", "nonce", noncesToPrevFinal, "shardID", bp.shardCoordinator.SelfId(), "error", err) @@ -1018,7 +1019,7 @@ func (bp *baseProcessor) cleanupPoolsForCrossShard( ) { crossNotarizedHeader, _, err := bp.blockTracker.GetCrossNotarizedHeader(shardID, noncesToPrevFinal) if err != nil { - displayCleanupErrorMessage("cleanupPoolsForCrossShard", + bp.displayCleanupErrorMessage("cleanupPoolsForCrossShard", shardID, noncesToPrevFinal, err) @@ -1034,7 +1035,7 @@ func (bp *baseProcessor) cleanupPoolsForCrossShard( if bp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, crossNotarizedHeader.GetEpoch()) { err = bp.dataPool.Proofs().CleanupProofsBehindNonce(shardID, noncesToPrevFinal) if err != nil { - log.Warn("failed to cleanup notarized proofs behind nonce", + bp.log.Warn("failed to cleanup notarized proofs behind nonce", "nonce", noncesToPrevFinal, "shardID", shardID, "error", err) @@ -1076,7 +1077,7 @@ func (bp *baseProcessor) removeBlocksBody(nonce uint64, shardId uint32) { for _, header := range headers { errNotCritical := bp.removeBlockBodyOfHeader(header) if errNotCritical != nil { - log.Debug("RemoveBlockDataFromPool", "error", errNotCritical.Error()) + bp.log.Debug("RemoveBlockDataFromPool", "error", errNotCritical.Error()) } } } @@ -1126,14 +1127,14 @@ func (bp *baseProcessor) getFinalMiniBlocks(header data.HeaderHandler, body *blo var miniBlocks block.MiniBlockSlice if len(body.MiniBlocks) != len(header.GetMiniBlockHeaderHandlers()) { - log.Warn("baseProcessor.getFinalMiniBlocks: num of mini blocks and mini blocks headers does not match", "num of mb", len(body.MiniBlocks), "num of mbh", len(header.GetMiniBlockHeaderHandlers())) + bp.log.Warn("baseProcessor.getFinalMiniBlocks: num of mini blocks and mini blocks headers does not match", "num of mb", len(body.MiniBlocks), "num of mbh", len(header.GetMiniBlockHeaderHandlers())) return nil, process.ErrNumOfMiniBlocksAndMiniBlocksHeadersMismatch } for index, miniBlock := range body.MiniBlocks { miniBlockHeader := header.GetMiniBlockHeaderHandlers()[index] if !miniBlockHeader.IsFinal() { - log.Debug("shardProcessor.getFinalMiniBlocks: do not remove from pool / broadcast mini block which is not final", "mb hash", miniBlockHeader.GetHash()) + bp.log.Debug("shardProcessor.getFinalMiniBlocks: do not remove from pool / broadcast mini block which is not final", "mb hash", miniBlockHeader.GetHash()) continue } @@ -1158,7 +1159,7 @@ func (bp *baseProcessor) cleanupBlockTrackerPools(noncesToPrevFinal uint64) { func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, noncesToPrevFinal uint64) { selfNotarizedHeader, _, errSelfNotarized := bp.blockTracker.GetSelfNotarizedHeader(shardID, noncesToPrevFinal) if errSelfNotarized != nil { - displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetSelfNotarizedHeader", + bp.displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetSelfNotarizedHeader", shardID, noncesToPrevFinal, errSelfNotarized) @@ -1171,7 +1172,7 @@ func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, nonces if shardID != bp.shardCoordinator.SelfId() { crossNotarizedHeader, _, errCrossNotarized := bp.blockTracker.GetCrossNotarizedHeader(shardID, noncesToPrevFinal) if errCrossNotarized != nil { - displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetCrossNotarizedHeader", + bp.displayCleanupErrorMessage("cleanupBlockTrackerPoolsForShard.GetCrossNotarizedHeader", shardID, noncesToPrevFinal, errCrossNotarized) @@ -1187,7 +1188,7 @@ func (bp *baseProcessor) cleanupBlockTrackerPoolsForShard(shardID uint32, nonces crossNotarizedNonce, ) - log.Trace("cleanupBlockTrackerPoolsForShard.CleanupHeadersBehindNonce", + bp.log.Trace("cleanupBlockTrackerPoolsForShard.CleanupHeadersBehindNonce", "shard", shardID, "self notarized nonce", selfNotarizedNonce, "cross notarized nonce", crossNotarizedNonce, @@ -1219,7 +1220,7 @@ func (bp *baseProcessor) prepareDataForBootStorer(args bootStorerDataArgs) { elapsedTime := time.Since(startTime) if elapsedTime >= common.PutInStorerMaxTime { - log.Warn("saveDataForBootStorer", "elapsed time", elapsedTime) + bp.log.Warn("saveDataForBootStorer", "elapsed time", elapsedTime) } } @@ -1248,7 +1249,7 @@ func (bp *baseProcessor) getLastCrossNotarizedHeaders() []bootstrapStorage.Boots func (bp *baseProcessor) getLastCrossNotarizedHeadersForShard(shardID uint32) *bootstrapStorage.BootstrapHeaderInfo { lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash, err := bp.blockTracker.GetLastCrossNotarizedHeader(shardID) if err != nil { - log.Warn("getLastCrossNotarizedHeadersForShard", + bp.log.Warn("getLastCrossNotarizedHeadersForShard", "shard", shardID, "error", err.Error()) return nil @@ -1292,7 +1293,7 @@ func (bp *baseProcessor) getLastSelfNotarizedHeaders() []bootstrapStorage.Bootst func (bp *baseProcessor) getLastSelfNotarizedHeadersForShard(shardID uint32) *bootstrapStorage.BootstrapHeaderInfo { lastSelfNotarizedHeader, lastSelfNotarizedHeaderHash, err := bp.blockTracker.GetLastSelfNotarizedHeader(shardID) if err != nil { - log.Warn("getLastSelfNotarizedHeadersForShard", + bp.log.Warn("getLastSelfNotarizedHeadersForShard", "shard", shardID, "error", err.Error()) return nil @@ -1350,7 +1351,7 @@ func (bp *baseProcessor) DecodeBlockBody(dta []byte) data.BodyHandler { err := bp.marshalizer.Unmarshal(body, dta) if err != nil { - log.Debug("DecodeBlockBody.Unmarshal", "error", err.Error()) + bp.log.Debug("DecodeBlockBody.Unmarshal", "error", err.Error()) return nil } @@ -1361,14 +1362,14 @@ func (bp *baseProcessor) saveBody(body *block.Body, header data.HeaderHandler, h startTime := time.Now() bp.txCoordinator.SaveTxsToStorage(body) - log.Trace("saveBody.SaveTxsToStorage", "time", time.Since(startTime)) + bp.log.Trace("saveBody.SaveTxsToStorage", "time", time.Since(startTime)) var errNotCritical error var marshalizedMiniBlock []byte for i := 0; i < len(body.MiniBlocks); i++ { marshalizedMiniBlock, errNotCritical = bp.marshalizer.Marshal(body.MiniBlocks[i]) if errNotCritical != nil { - log.Warn("saveBody.Marshal", "error", errNotCritical.Error()) + bp.log.Warn("saveBody.Marshal", "error", errNotCritical.Error()) continue } @@ -1379,7 +1380,7 @@ func (bp *baseProcessor) saveBody(body *block.Body, header data.HeaderHandler, h "saveBody.Put -> MiniBlockUnit", "err", errNotCritical) } - log.Trace("saveBody.Put -> MiniBlockUnit", "time", time.Since(startTime), "hash", miniBlockHash) + bp.log.Trace("saveBody.Put -> MiniBlockUnit", "time", time.Since(startTime), "hash", miniBlockHash) } receiptsHolder := holders.NewReceiptsHolder(bp.txCoordinator.GetCreatedInShardMiniBlocks()) @@ -1394,7 +1395,7 @@ func (bp *baseProcessor) saveBody(body *block.Body, header data.HeaderHandler, h elapsedTime := time.Since(startTime) if elapsedTime >= common.PutInStorerMaxTime { - log.Warn("saveBody", "elapsed time", elapsedTime) + bp.log.Warn("saveBody", "elapsed time", elapsedTime) } } @@ -1420,7 +1421,7 @@ func (bp *baseProcessor) saveShardHeader(header data.HeaderHandler, headerHash [ elapsedTime := time.Since(startTime) if elapsedTime >= common.PutInStorerMaxTime { - log.Warn("saveShardHeader", "elapsed time", elapsedTime) + bp.log.Warn("saveShardHeader", "elapsed time", elapsedTime) } } @@ -1445,7 +1446,7 @@ func (bp *baseProcessor) saveMetaHeader(header data.HeaderHandler, headerHash [] elapsedTime := time.Since(startTime) if elapsedTime >= common.PutInStorerMaxTime { - log.Warn("saveMetaHeader", "elapsed time", elapsedTime) + bp.log.Warn("saveMetaHeader", "elapsed time", elapsedTime) } } @@ -1461,7 +1462,7 @@ func getLastSelfNotarizedHeaderByItself(chainHandler data.ChainHandler) (data.He } func (bp *baseProcessor) setFinalizedHeaderHashInIndexer(hdrHash []byte) { - log.Debug("baseProcessor.setFinalizedHeaderHashInIndexer", "finalized header hash", hdrHash) + bp.log.Debug("baseProcessor.setFinalizedHeaderHashInIndexer", "finalized header hash", hdrHash) bp.outportHandler.FinalizedBlock(&outportcore.FinalizedBlock{ShardID: bp.shardCoordinator.SelfId(), HeaderHash: hdrHash}) } @@ -1494,7 +1495,7 @@ func (bp *baseProcessor) revertAccountState() { for key := range bp.accountsDB { err := bp.accountsDB[key].RevertToSnapshot(0) if err != nil { - log.Debug("RevertToSnapshot", "error", err.Error()) + bp.log.Debug("RevertToSnapshot", "error", err.Error()) } } } @@ -1503,7 +1504,7 @@ func (bp *baseProcessor) revertScheduledInfo() { header, headerHash := bp.getLastCommittedHeaderAndHash() err := bp.scheduledTxsExecutionHandler.RollBackToBlock(headerHash) if err != nil { - log.Trace("baseProcessor.revertScheduledInfo", "error", err.Error()) + bp.log.Trace("baseProcessor.revertScheduledInfo", "error", err.Error()) scheduledInfo := &process.ScheduledInfo{ RootHash: header.GetRootHash(), IntermediateTxs: make(map[block.Type][]data.TransactionHandler), @@ -1540,7 +1541,7 @@ func (bp *baseProcessor) RevertAccountsDBToSnapshot(accountsSnapshot map[state.A for key := range bp.accountsDB { err := bp.accountsDB[key].RevertToSnapshot(accountsSnapshot[key]) if err != nil { - log.Debug("RevertAccountsDBToSnapshot", "error", err.Error()) + bp.log.Debug("RevertAccountsDBToSnapshot", "error", err.Error()) } } } @@ -1623,7 +1624,7 @@ func (bp *baseProcessor) PruneStateOnRollback(currHeader data.HeaderHandler, cur func (bp *baseProcessor) getPruningHandler(finalHeaderNonce uint64) state.PruningHandler { if finalHeaderNonce-bp.lastRestartNonce <= uint64(bp.pruningDelay) { - log.Debug("will skip pruning", + bp.log.Debug("will skip pruning", "finalHeaderNonce", finalHeaderNonce, "last restart nonce", bp.lastRestartNonce, "num blocks for pruning delay", bp.pruningDelay, @@ -1659,17 +1660,17 @@ func (bp *baseProcessor) displayMiniBlocksPool() { for _, hash := range miniBlocksPool.Keys() { value, ok := miniBlocksPool.Get(hash) if !ok { - log.Debug("displayMiniBlocksPool: mini block not found", "hash", logger.DisplayByteSlice(hash)) + bp.log.Debug("displayMiniBlocksPool: mini block not found", "hash", logger.DisplayByteSlice(hash)) continue } miniBlock, ok := value.(*block.MiniBlock) if !ok { - log.Debug("displayMiniBlocksPool: wrong type assertion", "hash", logger.DisplayByteSlice(hash)) + bp.log.Debug("displayMiniBlocksPool: wrong type assertion", "hash", logger.DisplayByteSlice(hash)) continue } - log.Trace("mini block in pool", + bp.log.Trace("mini block in pool", "hash", logger.DisplayByteSlice(hash), "type", miniBlock.Type, "sender", miniBlock.SenderShardID, @@ -1690,19 +1691,19 @@ func trimSliceBootstrapHeaderInfo(in []bootstrapStorage.BootstrapHeaderInfo) []b func (bp *baseProcessor) restoreBlockBody(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) { if check.IfNil(bodyHandler) { - log.Debug("restoreMiniblocks nil bodyHandler") + bp.log.Debug("restoreMiniblocks nil bodyHandler") return } body, ok := bodyHandler.(*block.Body) if !ok { - log.Debug("restoreMiniblocks wrong type assertion for bodyHandler") + bp.log.Debug("restoreMiniblocks wrong type assertion for bodyHandler") return } _, errNotCritical := bp.txCoordinator.RestoreBlockDataFromStorage(body) if errNotCritical != nil { - log.Debug("restoreBlockBody RestoreBlockDataFromStorage", "error", errNotCritical.Error()) + bp.log.Debug("restoreBlockBody RestoreBlockDataFromStorage", "error", errNotCritical.Error()) } go bp.txCounter.headerReverted(headerHandler) @@ -1730,7 +1731,7 @@ func (bp *baseProcessor) RestoreBlockBodyIntoPools(bodyHandler data.BodyHandler) func (bp *baseProcessor) requestMiniBlocksIfNeeded(headerHandler data.HeaderHandler) { lastCrossNotarizedHeader, _, err := bp.blockTracker.GetLastCrossNotarizedHeader(headerHandler.GetShardID()) if err != nil { - log.Debug("requestMiniBlocksIfNeeded.GetLastCrossNotarizedHeader", + bp.log.Debug("requestMiniBlocksIfNeeded.GetLastCrossNotarizedHeader", "shard", headerHandler.GetShardID(), "error", err.Error()) return @@ -1765,7 +1766,7 @@ func (bp *baseProcessor) recordBlockInHistory(blockHeaderHash []byte, blockHeade if core.IsClosingError(err) { logLevel = logger.LogDebug } - log.Log(logLevel, "historyRepo.RecordBlock()", "blockHeaderHash", blockHeaderHash, "error", err.Error()) + bp.log.Log(logLevel, "historyRepo.RecordBlock()", "blockHeaderHash", blockHeaderHash, "error", err.Error()) } } @@ -1773,7 +1774,7 @@ func (bp *baseProcessor) addHeaderIntoTrackerPool(nonce uint64, shardID uint32) headersPool := bp.dataPool.Headers() headers, hashes, err := headersPool.GetHeadersByNonceAndShardId(nonce, shardID) if err != nil { - log.Trace("baseProcessor.addHeaderIntoTrackerPool", "error", err.Error()) + bp.log.Trace("baseProcessor.addHeaderIntoTrackerPool", "error", err.Error()) return } @@ -1841,7 +1842,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl if errUnmarshal != nil { numCodeLeaves++ totalSizeCodeLeaves += len(leaf.Value()) - log.Trace("cannot unmarshal user account. it may be a code leaf", "error", errUnmarshal) + bp.log.Trace("cannot unmarshal user account. it may be a code leaf", "error", errUnmarshal) continue } @@ -1902,7 +1903,7 @@ func (bp *baseProcessor) commitTrieEpochRootHashIfNeeded(metaBlock *block.MetaBl "from which totalSizeAccountsDataTries", totalSizeAccountsDataTries}...) } - log.Debug("sum of addresses in shard at epoch start", stats...) + bp.log.Debug("sum of addresses in shard at epoch start", stats...) return nil } @@ -1958,7 +1959,7 @@ func (bp *baseProcessor) ProcessScheduledBlock(headerHandler data.HeaderHandler, bp.processStatusHandler.SetIdle() }() - scheduledMiniBlocksFromMe, err := getScheduledMiniBlocksFromMe(headerHandler, bodyHandler) + scheduledMiniBlocksFromMe, err := bp.getScheduledMiniBlocksFromMe(headerHandler, bodyHandler) if err != nil { return err } @@ -1970,7 +1971,7 @@ func (bp *baseProcessor) ProcessScheduledBlock(headerHandler data.HeaderHandler, startTime := time.Now() err = bp.scheduledTxsExecutionHandler.ExecuteAll(haveTime) elapsedTime := time.Since(startTime) - log.Debug("elapsed time to execute all scheduled transactions", + bp.log.Debug("elapsed time to execute all scheduled transactions", "time [s]", elapsedTime, ) if err != nil { @@ -1986,21 +1987,21 @@ func (bp *baseProcessor) ProcessScheduledBlock(headerHandler data.HeaderHandler, finalProcessingGasAndFees := bp.getGasAndFeesWithScheduled() - scheduledProcessingGasAndFees := gasAndFeesDelta(normalProcessingGasAndFees, finalProcessingGasAndFees) + scheduledProcessingGasAndFees := bp.gasAndFeesDelta(normalProcessingGasAndFees, finalProcessingGasAndFees) bp.scheduledTxsExecutionHandler.SetScheduledRootHash(rootHash) bp.scheduledTxsExecutionHandler.SetScheduledGasAndFees(scheduledProcessingGasAndFees) return nil } -func getScheduledMiniBlocksFromMe(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) (block.MiniBlockSlice, error) { +func (bp *baseProcessor) getScheduledMiniBlocksFromMe(headerHandler data.HeaderHandler, bodyHandler data.BodyHandler) (block.MiniBlockSlice, error) { body, ok := bodyHandler.(*block.Body) if !ok { return nil, process.ErrWrongTypeAssertion } if len(body.MiniBlocks) != len(headerHandler.GetMiniBlockHeaderHandlers()) { - log.Warn("getScheduledMiniBlocksFromMe: num of mini blocks and mini blocks headers does not match", "num of mb", len(body.MiniBlocks), "num of mbh", len(headerHandler.GetMiniBlockHeaderHandlers())) + bp.log.Warn("getScheduledMiniBlocksFromMe: num of mini blocks and mini blocks headers does not match", "num of mb", len(body.MiniBlocks), "num of mbh", len(headerHandler.GetMiniBlockHeaderHandlers())) return nil, process.ErrNumOfMiniBlocksAndMiniBlocksHeadersMismatch } @@ -2033,13 +2034,13 @@ func (bp *baseProcessor) getGasAndFeesWithScheduled() scheduled.GasAndFees { return gasAndFees } -func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) scheduled.GasAndFees { +func (bp *baseProcessor) gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) scheduled.GasAndFees { zero := big.NewInt(0) result := process.GetZeroGasAndFees() deltaAccumulatedFees := big.NewInt(0).Sub(finalGasAndFees.AccumulatedFees, initialGasAndFees.AccumulatedFees) if deltaAccumulatedFees.Cmp(zero) < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial accumulatedFees", initialGasAndFees.AccumulatedFees.String(), "final accumulatedFees", finalGasAndFees.AccumulatedFees.String(), "error", process.ErrNegativeValue) @@ -2048,7 +2049,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc deltaDevFees := big.NewInt(0).Sub(finalGasAndFees.DeveloperFees, initialGasAndFees.DeveloperFees) if deltaDevFees.Cmp(zero) < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial devFees", initialGasAndFees.DeveloperFees.String(), "final devFees", finalGasAndFees.DeveloperFees.String(), "error", process.ErrNegativeValue) @@ -2057,7 +2058,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc deltaGasProvided := int64(finalGasAndFees.GasProvided) - int64(initialGasAndFees.GasProvided) if deltaGasProvided < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial gasProvided", initialGasAndFees.GasProvided, "final gasProvided", finalGasAndFees.GasProvided, "error", process.ErrNegativeValue) @@ -2066,7 +2067,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc deltaGasPenalized := int64(finalGasAndFees.GasPenalized) - int64(initialGasAndFees.GasPenalized) if deltaGasPenalized < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial gasPenalized", initialGasAndFees.GasPenalized, "final gasPenalized", finalGasAndFees.GasPenalized, "error", process.ErrNegativeValue) @@ -2075,7 +2076,7 @@ func gasAndFeesDelta(initialGasAndFees, finalGasAndFees scheduled.GasAndFees) sc deltaGasRefunded := int64(finalGasAndFees.GasRefunded) - int64(initialGasAndFees.GasRefunded) if deltaGasRefunded < 0 { - log.Error("gasAndFeesDelta", + bp.log.Error("gasAndFeesDelta", "initial gasRefunded", initialGasAndFees.GasRefunded, "final gasRefunded", finalGasAndFees.GasRefunded, "error", process.ErrNegativeValue) @@ -2098,7 +2099,7 @@ func (bp *baseProcessor) getIndexOfFirstMiniBlockToBeExecuted(header data.Header for index, miniBlockHeaderHandler := range header.GetMiniBlockHeaderHandlers() { if miniBlockHeaderHandler.GetProcessingType() == int32(block.Processed) { - log.Debug("baseProcessor.getIndexOfFirstMiniBlockToBeExecuted: mini block is already executed", + bp.log.Debug("baseProcessor.getIndexOfFirstMiniBlockToBeExecuted: mini block is already executed", "mb hash", miniBlockHeaderHandler.GetHash(), "mb index", index) continue @@ -2110,7 +2111,7 @@ func (bp *baseProcessor) getIndexOfFirstMiniBlockToBeExecuted(header data.Header return len(header.GetMiniBlockHeaderHandlers()) } -func displayCleanupErrorMessage(message string, shardID uint32, noncesToPrevFinal uint64, err error) { +func (bp *baseProcessor) displayCleanupErrorMessage(message string, shardID uint32, noncesToPrevFinal uint64, err error) { // 2 blocks on shard + 2 blocks on meta + 1 block to previous final maxNoncesToPrevFinalWithoutWarn := uint64(process.BlockFinality+1)*2 + 1 level := logger.LogWarning @@ -2118,7 +2119,7 @@ func displayCleanupErrorMessage(message string, shardID uint32, noncesToPrevFina level = logger.LogDebug } - log.Log(level, message, + bp.log.Log(level, message, "shard", shardID, "nonces to previous final", noncesToPrevFinal, "error", err.Error()) diff --git a/process/block/displayBlock.go b/process/block/displayBlock.go index 3b1ab7410cc..54664482c47 100644 --- a/process/block/displayBlock.go +++ b/process/block/displayBlock.go @@ -20,6 +20,7 @@ import ( ) type transactionCounter struct { + log logger.Logger mutex sync.RWMutex currentBlockTxs uint64 totalTxs uint64 @@ -31,6 +32,7 @@ type transactionCounter struct { // ArgsTransactionCounter represents the arguments needed to create a new transaction counter type ArgsTransactionCounter struct { + Logger logger.Logger AppStatusHandler core.AppStatusHandler Hasher hashing.Hasher Marshalizer marshal.Marshalizer @@ -50,7 +52,14 @@ func NewTransactionCounter(args ArgsTransactionCounter) (*transactionCounter, er return nil, process.ErrNilMarshalizer } + var log logger.Logger + log = logger.GetOrCreate("process/block") + if args.Logger != nil { + log = args.Logger + } + return &transactionCounter{ + log: log, mutex: sync.RWMutex{}, appStatusHandler: args.AppStatusHandler, currentBlockTxs: 0, @@ -71,7 +80,7 @@ func (txc *transactionCounter) getPoolCounts(poolsHolder dataRetriever.PoolsHold // headerReverted updates the total processed txs in case of restore. It also sets the current block txs to 0 func (txc *transactionCounter) headerReverted(hdr data.HeaderHandler) { if check.IfNil(hdr) { - log.Warn("programming error: nil header in transactionCounter.headerReverted function") + txc.log.Warn("programming error: nil header in transactionCounter.headerReverted function") return } @@ -95,7 +104,7 @@ func (txc *transactionCounter) safeSubtractTotalTxs(delta uint64) { func (txc *transactionCounter) headerExecuted(hdr data.HeaderHandler) { if check.IfNil(hdr) { - log.Warn("programming error: nil header in transactionCounter.headerExecuted function") + txc.log.Warn("programming error: nil header in transactionCounter.headerExecuted function") return } @@ -141,7 +150,7 @@ func (txc *transactionCounter) displayLogInfo( tblString, err := display.CreateTableString(dispHeader, dispLines) if err != nil { - log.Debug("CreateTableString", "error", err.Error()) + txc.log.Debug("CreateTableString", "error", err.Error()) return } @@ -154,7 +163,7 @@ func (txc *transactionCounter) displayLogInfo( "shard", selfId, } txc.mutex.RUnlock() - log.Debug(message, arguments...) + txc.log.Debug(message, arguments...) blockTracker.DisplayTrackedHeaders() } @@ -330,6 +339,7 @@ func getConstructionStateAsString(miniBlockHeader data.MiniBlockHeaderHandler) s // DisplayLastNotarized will display information about last notarized block func DisplayLastNotarized( + log logger.Logger, marshalizer marshal.Marshalizer, hasher hashing.Hasher, lastNotarizedHdrForShard data.HeaderHandler, diff --git a/process/block/displayMetaBlock.go b/process/block/displayMetaBlock.go index 2018b819925..2c03af8ce36 100644 --- a/process/block/displayMetaBlock.go +++ b/process/block/displayMetaBlock.go @@ -9,7 +9,7 @@ import ( "github.com/multiversx/mx-chain-core-go/data/block" "github.com/multiversx/mx-chain-core-go/display" "github.com/multiversx/mx-chain-go/process" - "github.com/multiversx/mx-chain-logger-go" + logger "github.com/multiversx/mx-chain-logger-go" ) type transactionCountersProvider interface { @@ -19,6 +19,7 @@ type transactionCountersProvider interface { } type headersCounter struct { + log logger.Logger shardMBHeaderCounterMutex sync.RWMutex shardMBHeadersCurrentBlockProcessed uint64 shardMBHeadersTotalProcessed uint64 @@ -26,8 +27,9 @@ type headersCounter struct { // NewHeaderCounter returns a new object that keeps track of how many headers // were processed in total, and in the current block -func NewHeaderCounter() *headersCounter { +func NewHeaderCounter(log logger.Logger) *headersCounter { return &headersCounter{ + log: log, shardMBHeaderCounterMutex: sync.RWMutex{}, shardMBHeadersCurrentBlockProcessed: 0, shardMBHeadersTotalProcessed: 0, @@ -72,7 +74,7 @@ func (hc *headersCounter) displayLogInfo( blockTracker process.BlockTracker, ) { if check.IfNil(countersProvider) { - log.Warn("programming error in headersCounter.displayLogInfo - nil countersProvider") + hc.log.Warn("programming error in headersCounter.displayLogInfo - nil countersProvider") return } @@ -83,7 +85,7 @@ func (hc *headersCounter) displayLogInfo( tblString, err := display.CreateTableString(dispHeader, dispLines) if err != nil { - log.Debug("CreateTableString", "error", err.Error()) + hc.log.Debug("CreateTableString", "error", err.Error()) return } @@ -96,9 +98,9 @@ func (hc *headersCounter) displayLogInfo( } hc.shardMBHeaderCounterMutex.RUnlock() - log.Debug(message, arguments...) + hc.log.Debug(message, arguments...) - log.Debug("metablock metrics info", + hc.log.Debug("metablock metrics info", "total txs processed", countersProvider.TotalTxs(), "block txs processed", countersProvider.CurrentBlockTxs(), "hash", headerHash, diff --git a/process/block/headerValidator.go b/process/block/headerValidator.go index 9459280c847..2d90892272b 100644 --- a/process/block/headerValidator.go +++ b/process/block/headerValidator.go @@ -9,17 +9,20 @@ import ( "github.com/multiversx/mx-chain-core-go/hashing" "github.com/multiversx/mx-chain-core-go/marshal" "github.com/multiversx/mx-chain-go/process" + logger "github.com/multiversx/mx-chain-logger-go" ) var _ process.HeaderConstructionValidator = (*headerValidator)(nil) // ArgsHeaderValidator are the arguments needed to create a new header validator type ArgsHeaderValidator struct { + Logger logger.Logger Hasher hashing.Hasher Marshalizer marshal.Marshalizer } type headerValidator struct { + log logger.Logger hasher hashing.Hasher marshalizer marshal.Marshalizer } @@ -33,7 +36,14 @@ func NewHeaderValidator(args ArgsHeaderValidator) (*headerValidator, error) { return nil, process.ErrNilMarshalizer } + var log logger.Logger + log = logger.GetOrCreate("process/block") + if args.Logger != nil { + log = args.Logger + } + return &headerValidator{ + log: log, hasher: args.Hasher, marshalizer: args.Marshalizer, }, nil @@ -49,7 +59,7 @@ func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data. } if prevHeader.GetRound() >= currHeader.GetRound() { - log.Trace("round does not match", + h.log.Trace("round does not match", "shard", currHeader.GetShardID(), "local header round", prevHeader.GetRound(), "received round", currHeader.GetRound()) @@ -57,7 +67,7 @@ func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data. } if currHeader.GetNonce() != prevHeader.GetNonce()+1 { - log.Trace("nonce does not match", + h.log.Trace("nonce does not match", "shard", currHeader.GetShardID(), "local header nonce", prevHeader.GetNonce(), "received nonce", currHeader.GetNonce()) @@ -70,7 +80,7 @@ func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data. } if !bytes.Equal(currHeader.GetPrevHash(), prevHeaderHash) { - log.Trace("header hash does not match", + h.log.Trace("header hash does not match", "shard", currHeader.GetShardID(), "local header hash", prevHeaderHash, "received header with prev hash", currHeader.GetPrevHash(), @@ -79,7 +89,7 @@ func (h *headerValidator) IsHeaderConstructionValid(currHeader, prevHeader data. } if !bytes.Equal(currHeader.GetPrevRandSeed(), prevHeader.GetRandSeed()) { - log.Trace("header random seed does not match", + h.log.Trace("header random seed does not match", "shard", currHeader.GetShardID(), "local header random seed", prevHeader.GetRandSeed(), "received header with prev random seed", currHeader.GetPrevRandSeed(), diff --git a/process/block/metablock.go b/process/block/metablock.go index 04220d9936d..37f2c94cf7d 100644 --- a/process/block/metablock.go +++ b/process/block/metablock.go @@ -94,7 +94,15 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { } genesisHdr := arguments.DataComponents.Blockchain().GetGenesisHeader() + + var log logger.Logger + log = logger.GetOrCreate("process/sync") + if arguments.Logger != nil { + log = arguments.Logger + } + base := &baseProcessor{ + log: log, accountsDB: arguments.AccountsDB, blockSizeThrottler: arguments.BlockSizeThrottler, forkDetector: arguments.ForkDetector, @@ -146,7 +154,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { mp := metaProcessor{ baseProcessor: base, - headersCounter: NewHeaderCounter(), + headersCounter: NewHeaderCounter(log), scToProtocol: arguments.SCToProtocol, pendingMiniBlocksHandler: arguments.PendingMiniBlocksHandler, epochStartDataCreator: arguments.EpochStartDataCreator, @@ -158,6 +166,7 @@ func NewMetaProcessor(arguments ArgMetaProcessor) (*metaProcessor, error) { } argsTransactionCounter := ArgsTransactionCounter{ + Logger: log, AppStatusHandler: mp.appStatusHandler, Hasher: mp.hasher, Marshalizer: mp.marshalizer, @@ -205,7 +214,7 @@ func (mp *metaProcessor) ProcessBlock( err := mp.checkBlockValidity(headerHandler, bodyHandler) if err != nil { if errors.Is(err, process.ErrBlockHashDoesNotMatch) { - log.Debug("requested missing meta header", + mp.log.Debug("requested missing meta header", "hash", headerHandler.GetPrevHash(), "for shard", headerHandler.GetShardID(), ) @@ -220,7 +229,7 @@ func (mp *metaProcessor) ProcessBlock( mp.epochNotifier.CheckEpoch(headerHandler) mp.requestHandler.SetEpoch(headerHandler.GetEpoch()) - log.Debug("started processing block", + mp.log.Debug("started processing block", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -253,9 +262,9 @@ func (mp *metaProcessor) ProcessBlock( } txCounts, rewardCounts, unsignedCounts := mp.txCounter.getPoolCounts(mp.dataPool) - log.Debug("total txs in pool", "counts", txCounts.String()) - log.Debug("total txs in rewards pool", "counts", rewardCounts.String()) - log.Debug("total txs in unsigned pool", "counts", unsignedCounts.String()) + mp.log.Debug("total txs in pool", "counts", txCounts.String()) + mp.log.Debug("total txs in rewards pool", "counts", rewardCounts.String()) + mp.log.Debug("total txs in unsigned pool", "counts", unsignedCounts.String()) go getMetricsFromMetaHeader( header, @@ -285,7 +294,7 @@ func (mp *metaProcessor) ProcessBlock( } if mp.accountsDB[state.UserAccountsState].JournalLen() != 0 { - log.Error("metaProcessor.ProcessBlock first entry", "stack", string(mp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) + mp.log.Error("metaProcessor.ProcessBlock first entry", "stack", string(mp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) return process.ErrAccountStateDirty } @@ -314,12 +323,12 @@ func (mp *metaProcessor) ProcessBlock( haveMissingShardHeaders := requestedShardHdrs > 0 || requestedFinalityAttestingShardHdrs > 0 if haveMissingShardHeaders { if requestedShardHdrs > 0 { - log.Debug("requested missing shard headers", + mp.log.Debug("requested missing shard headers", "num headers", requestedShardHdrs, ) } if requestedFinalityAttestingShardHdrs > 0 { - log.Debug("requested missing finality attesting shard headers", + mp.log.Debug("requested missing finality attesting shard headers", "num finality shard headers", requestedFinalityAttestingShardHdrs, ) } @@ -333,7 +342,7 @@ func (mp *metaProcessor) ProcessBlock( mp.hdrsForCurrBlock.resetMissingHdrs() if requestedShardHdrs > 0 { - log.Debug("received missing shard headers", + mp.log.Debug("received missing shard headers", "num headers", requestedShardHdrs-missingShardHdrs, ) } @@ -378,7 +387,7 @@ func (mp *metaProcessor) ProcessBlock( startTime := time.Now() err = mp.txCoordinator.ProcessBlockTransaction(header, &block.Body{MiniBlocks: miniBlocks}, haveTime) elapsedTime := time.Since(startTime) - log.Debug("elapsed time to process block transaction", + mp.log.Debug("elapsed time to process block transaction", "time [s]", elapsedTime, ) if err != nil { @@ -428,7 +437,7 @@ func (mp *metaProcessor) checkProofsForShardData(header *block.MetaBlock) error // compare the one from proofsPool with what shardData.CurrentSignature and shardData.CurrentPubKeysBitmap hold // if they are different, verify the proof received on header if !mp.proofsPool.HasProof(shardData.ShardID, shardData.HeaderHash) { - return fmt.Errorf("%w for header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(shardData.HeaderHash)) + return fmt.Errorf("%w for meta header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(shardData.HeaderHash)) } } @@ -542,7 +551,7 @@ func (mp *metaProcessor) checkEpochCorrectness( isEpochIncorrect := headerHandler.GetEpoch() != currentBlockHeader.GetEpoch() && mp.epochStartTrigger.Epoch() == currentBlockHeader.GetEpoch() if isEpochIncorrect { - log.Warn("epoch does not match", "currentHeaderEpoch", currentBlockHeader.GetEpoch(), "receivedHeaderEpoch", headerHandler.GetEpoch(), "epochStartTrigger", mp.epochStartTrigger.Epoch()) + mp.log.Warn("epoch does not match", "currentHeaderEpoch", currentBlockHeader.GetEpoch(), "receivedHeaderEpoch", headerHandler.GetEpoch(), "epochStartTrigger", mp.epochStartTrigger.Epoch()) return process.ErrEpochDoesNotMatch } @@ -550,7 +559,7 @@ func (mp *metaProcessor) checkEpochCorrectness( mp.epochStartTrigger.EpochStartRound() <= headerHandler.GetRound() && headerHandler.GetEpoch() != currentBlockHeader.GetEpoch()+1 if isEpochIncorrect { - log.Warn("is epoch start and epoch does not match", "currentHeaderEpoch", currentBlockHeader.GetEpoch(), "receivedHeaderEpoch", headerHandler.GetEpoch(), "epochStartTrigger", mp.epochStartTrigger.Epoch()) + mp.log.Warn("is epoch start and epoch does not match", "currentHeaderEpoch", currentBlockHeader.GetEpoch(), "receivedHeaderEpoch", headerHandler.GetEpoch(), "epochStartTrigger", mp.epochStartTrigger.Epoch()) return process.ErrEpochDoesNotMatch } @@ -630,7 +639,7 @@ func (mp *metaProcessor) checkAndRequestIfShardHeadersMissing() { for i := uint32(0); i < mp.shardCoordinator.NumberOfShards(); i++ { err := mp.requestHeadersIfMissing(orderedHdrsPerShard[i], i) if err != nil { - log.Debug("checkAndRequestIfShardHeadersMissing", "error", err.Error()) + mp.log.Debug("checkAndRequestIfShardHeadersMissing", "error", err.Error()) continue } } @@ -648,7 +657,7 @@ func (mp *metaProcessor) indexBlock( return } - log.Debug("preparing to index block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) + mp.log.Debug("preparing to index block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) argSaveBlock, err := mp.outportDataProvider.PrepareOutportSaveBlockData(processOutport.ArgPrepareOutportSaveBlockData{ HeaderHash: headerHash, Header: metaBlock, @@ -660,18 +669,18 @@ func (mp *metaProcessor) indexBlock( HighestFinalBlockHash: mp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { - log.Error("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error(), + mp.log.Error("metaProcessor.indexBlock cannot prepare argSaveBlock", "error", err.Error(), "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) return } err = mp.outportHandler.SaveBlock(argSaveBlock) if err != nil { - log.Error("metaProcessor.outportHandler.SaveBlock cannot save block", "error", err, + mp.log.Error("metaProcessor.outportHandler.SaveBlock cannot save block", "error", err, "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) return } - log.Debug("indexed block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) + mp.log.Debug("indexed block", "hash", headerHash, "nonce", metaBlock.GetNonce(), "round", metaBlock.GetRound()) indexRoundInfo(mp.outportHandler, mp.nodesCoordinator, core.MetachainShardId, metaBlock, lastMetaBlock, argSaveBlock.SignersIndexes) @@ -708,7 +717,7 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, for _, hdrHash := range hdrHashes { shardHeader, errNotCritical := process.GetShardHeaderFromStorage(hdrHash, mp.marshalizer, mp.store) if errNotCritical != nil { - log.Debug("shard header not found in BlockHeaderUnit", + mp.log.Debug("shard header not found in BlockHeaderUnit", "hash", hdrHash, ) continue @@ -719,14 +728,14 @@ func (mp *metaProcessor) RestoreBlockIntoPools(headerHandler data.HeaderHandler, hdrNonceHashDataUnit := dataRetriever.ShardHdrNonceHashDataUnit + dataRetriever.UnitType(shardHeader.GetShardID()) storer, errNotCritical := mp.store.GetStorer(hdrNonceHashDataUnit) if errNotCritical != nil { - log.Debug("storage unit not found", "unit", hdrNonceHashDataUnit, "error", errNotCritical.Error()) + mp.log.Debug("storage unit not found", "unit", hdrNonceHashDataUnit, "error", errNotCritical.Error()) continue } nonceToByteSlice := mp.uint64Converter.ToByteSlice(shardHeader.GetNonce()) errNotCritical = storer.Remove(nonceToByteSlice) if errNotCritical != nil { - log.Debug("ShardHdrNonceHashDataUnit.Remove", "error", errNotCritical.Error()) + mp.log.Debug("ShardHdrNonceHashDataUnit.Remove", "error", errNotCritical.Error()) } mp.headersCounter.subtractRestoredMBHeaders(len(shardHeader.GetMiniBlockHeaderHandlers())) @@ -763,7 +772,7 @@ func (mp *metaProcessor) CreateBlock( var body data.BodyHandler if mp.accountsDB[state.UserAccountsState].JournalLen() != 0 { - log.Error("metaProcessor.CreateBlock first entry", "stack", string(mp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) + mp.log.Error("metaProcessor.CreateBlock first entry", "stack", string(mp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) return nil, nil, process.ErrAccountStateDirty } @@ -832,7 +841,7 @@ func (mp *metaProcessor) updateEpochStartHeader(metaHdr *block.MetaBlock) error sw.Start("createEpochStartForMetablock") defer func() { sw.Stop("createEpochStartForMetablock") - log.Debug("epochStartHeaderDataCreation", sw.GetMeasurements()...) + mp.log.Debug("epochStartHeaderDataCreation", sw.GetMeasurements()...) }() epochStart, err := mp.epochStartDataCreator.CreateEpochStartData() @@ -874,7 +883,7 @@ func (mp *metaProcessor) createEpochStartBody(metaBlock *block.MetaBlock) (data. return nil, err } - log.Debug("started creating epoch start block body", + mp.log.Debug("started creating epoch start block body", "epoch", metaBlock.GetEpoch(), "round", metaBlock.GetRound(), "nonce", metaBlock.GetNonce(), @@ -951,7 +960,7 @@ func (mp *metaProcessor) createBlockBody(metaBlock data.HeaderHandler, haveTime mp.blockSizeThrottler.ComputeCurrentMaxSize() - log.Debug("started creating meta block body", + mp.log.Debug("started creating meta block body", "epoch", metaBlock.GetEpoch(), "round", metaBlock.GetRound(), "nonce", metaBlock.GetNonce(), @@ -984,26 +993,26 @@ func (mp *metaProcessor) createMiniBlocks( } if !haveTime() { - log.Debug("metaProcessor.createMiniBlocks", "error", process.ErrTimeIsOut) + mp.log.Debug("metaProcessor.createMiniBlocks", "error", process.ErrTimeIsOut) interMBs := mp.txCoordinator.CreatePostProcessMiniBlocks() if len(interMBs) > 0 { miniBlocks = append(miniBlocks, interMBs...) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + mp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, nil } mbsToMe, numTxs, numShardHeaders, err := mp.createAndProcessCrossMiniBlocksDstMe(haveTime) if err != nil { - log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) + mp.log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) } if len(mbsToMe) > 0 { miniBlocks = append(miniBlocks, mbsToMe...) - log.Debug("processed miniblocks and txs with destination in self shard", + mp.log.Debug("processed miniblocks and txs with destination in self shard", "num miniblocks", len(mbsToMe), "num txs", numTxs, "num shard headers", numShardHeaders, @@ -1019,13 +1028,13 @@ func (mp *metaProcessor) createMiniBlocks( numTxs += uint32(len(mb.TxHashes)) } - log.Debug("processed miniblocks and txs from self shard", + mp.log.Debug("processed miniblocks and txs from self shard", "num miniblocks", len(mbsFromMe), "num txs", numTxs, ) } - log.Debug("creating mini blocks has been finished", + mp.log.Debug("creating mini blocks has been finished", "miniblocks created", len(miniBlocks), ) @@ -1049,12 +1058,12 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( sw.Start("ComputeLongestShardsChainsFromLastNotarized") orderedHdrs, orderedHdrsHashes, _, err := mp.blockTracker.ComputeLongestShardsChainsFromLastNotarized() sw.Stop("ComputeLongestShardsChainsFromLastNotarized") - log.Debug("measurements ComputeLongestShardsChainsFromLastNotarized", sw.GetMeasurements()...) + mp.log.Debug("measurements ComputeLongestShardsChainsFromLastNotarized", sw.GetMeasurements()...) if err != nil { return nil, 0, 0, err } - log.Debug("shard headers ordered", + mp.log.Debug("shard headers ordered", "num shard headers", len(orderedHdrs), ) @@ -1076,14 +1085,14 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( mp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(orderedHdrs); i++ { if !haveTime() { - log.Debug("time is up after putting cross txs with destination to current shard", + mp.log.Debug("time is up after putting cross txs with destination to current shard", "num txs", txsAdded, ) break } if hdrsAdded >= maxShardHeadersAllowedInOneMetaBlock { - log.Debug("maximum shard headers allowed to be included in one meta block has been reached", + mp.log.Debug("maximum shard headers allowed to be included in one meta block has been reached", "shard headers added", hdrsAdded, ) break @@ -1091,7 +1100,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( currShardHdr := orderedHdrs[i] if currShardHdr.GetNonce() > lastShardHdr[currShardHdr.GetShardID()].GetNonce()+1 { - log.Trace("skip searching", + mp.log.Trace("skip searching", "shard", currShardHdr.GetShardID(), "last shard hdr nonce", lastShardHdr[currShardHdr.GetShardID()].GetNonce(), "curr shard hdr nonce", currShardHdr.GetNonce()) @@ -1099,7 +1108,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( } if hdrsAddedForShard[currShardHdr.GetShardID()] >= maxShardHeadersFromSameShard { - log.Trace("maximum shard headers from same shard allowed to be included in one meta block has been reached", + mp.log.Trace("maximum shard headers from same shard allowed to be included in one meta block has been reached", "shard", currShardHdr.GetShardID(), "shard headers added", hdrsAddedForShard[currShardHdr.GetShardID()], ) @@ -1110,7 +1119,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( if shouldCheckProof { hasProofForHdr := mp.proofsPool.HasProof(currShardHdr.GetShardID(), orderedHdrsHashes[i]) if !hasProofForHdr { - log.Trace("no proof for shard header", + mp.log.Trace("no proof for shard header", "shard", currShardHdr.GetShardID(), "hash", logger.DisplayByteSlice(orderedHdrsHashes[i]), ) @@ -1143,7 +1152,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( } if !hdrProcessFinished { - log.Debug("shard header cannot be fully processed", + mp.log.Debug("shard header cannot be fully processed", "round", currShardHdr.GetRound(), "nonce", currShardHdr.GetNonce(), "hash", orderedHdrsHashes[i]) @@ -1152,7 +1161,7 @@ func (mp *metaProcessor) createAndProcessCrossMiniBlocksDstMe( errAccountState := mp.accountsDB[state.UserAccountsState].RevertToSnapshot(snapshot) if errAccountState != nil { // TODO: evaluate if reloading the trie from disk will might solve the problem - log.Warn("accounts.RevertToSnapshot", "error", errAccountState.Error()) + mp.log.Warn("accounts.RevertToSnapshot", "error", errAccountState.Error()) } continue } @@ -1182,7 +1191,7 @@ func (mp *metaProcessor) requestShardHeadersIfNeeded( lastShardHdr map[uint32]data.HeaderHandler, ) { for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { - log.Debug("shard headers added", + mp.log.Debug("shard headers added", "shard", shardID, "num", hdrsAddedForShard[shardID], "highest nonce", lastShardHdr[shardID].GetNonce()) @@ -1219,7 +1228,7 @@ func (mp *metaProcessor) CommitBlock( return err } - log.Debug("started committing block", + mp.log.Debug("started committing block", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -1275,7 +1284,7 @@ func (mp *metaProcessor) CommitBlock( return err } - log.Info("meta block has been committed successfully", + mp.log.Info("meta block has been committed successfully", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -1286,17 +1295,17 @@ func (mp *metaProcessor) CommitBlock( errNotCritical := mp.checkSentSignaturesAtCommitTime(headerHandler) if errNotCritical != nil { - log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + mp.log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) } notarizedHeadersHashes, errNotCritical := mp.updateCrossShardInfo(header) if errNotCritical != nil { - log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) + mp.log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) } errNotCritical = mp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, nil, nil) if errNotCritical != nil { - log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) + mp.log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) } currentHeader, currentHeaderHash := getLastSelfNotarizedHeaderByItself(mp.blockChain) @@ -1309,7 +1318,7 @@ func (mp *metaProcessor) CommitBlock( go mp.historyRepo.OnNotarizedBlocks(mp.shardCoordinator.SelfId(), []data.HeaderHandler{currentHeader}, [][]byte{currentHeaderHash}) - log.Debug("highest final meta block", + mp.log.Debug("highest final meta block", "nonce", mp.forkDetector.GetHighestFinalBlockNonce(), ) @@ -1317,9 +1326,9 @@ func (mp *metaProcessor) CommitBlock( lastMetaBlock, ok := lastHeader.(data.MetaHeaderHandler) if !ok { if headerHandler.GetNonce() == firstHeaderNonce { - log.Debug("metaBlock.CommitBlock - nil current block header, this is expected at genesis time") + mp.log.Debug("metaBlock.CommitBlock - nil current block header, this is expected at genesis time") } else { - log.Error("metaBlock.CommitBlock - nil current block header, last current header should have not been nil") + mp.log.Error("metaBlock.CommitBlock - nil current block header, last current header should have not been nil") } } lastMetaBlockHash := mp.blockChain.GetCurrentBlockHeaderHash() @@ -1411,7 +1420,7 @@ func (mp *metaProcessor) CommitBlock( errNotCritical = mp.removeTxsFromPools(header, body) if errNotCritical != nil { - log.Debug("removeTxsFromPools", "error", errNotCritical.Error()) + mp.log.Debug("removeTxsFromPools", "error", errNotCritical.Error()) } mp.cleanupPools(headerHandler) @@ -1461,12 +1470,12 @@ func (mp *metaProcessor) displayPoolsInfo() { miniBlocksPool := mp.dataPool.MiniBlocks() for shardID := uint32(0); shardID < mp.shardCoordinator.NumberOfShards(); shardID++ { - log.Trace("pools info", + mp.log.Trace("pools info", "shard", shardID, "num headers", headersPool.GetNumHeaders(shardID)) } - log.Trace("pools info", + mp.log.Trace("pools info", "shard", core.MetachainShardId, "num headers", headersPool.GetNumHeaders(core.MetachainShardId)) @@ -1474,7 +1483,7 @@ func (mp *metaProcessor) displayPoolsInfo() { // (in this case this number is equal with: number of shards + metachain (self shard)) numShardsToKeepHeaders := int(mp.shardCoordinator.NumberOfShards()) + 1 capacity := headersPool.MaxSize() * numShardsToKeepHeaders - log.Debug("pools info", + mp.log.Debug("pools info", "total headers", headersPool.Len(), "headers pool capacity", capacity, "total miniblocks", miniBlocksPool.Len(), @@ -1486,7 +1495,7 @@ func (mp *metaProcessor) displayPoolsInfo() { func (mp *metaProcessor) updateState(lastMetaBlock data.MetaHeaderHandler, lastMetaBlockHash []byte) { if check.IfNil(lastMetaBlock) { - log.Debug("updateState nil header") + mp.log.Debug("updateState nil header") return } @@ -1500,12 +1509,12 @@ func (mp *metaProcessor) updateState(lastMetaBlock data.MetaHeaderHandler, lastM mp.store, ) if errNotCritical != nil { - log.Debug("could not get meta header from storage") + mp.log.Debug("could not get meta header from storage") return } if lastMetaBlock.IsStartOfEpochBlock() { - log.Debug("trie snapshot", + mp.log.Debug("trie snapshot", "rootHash", lastMetaBlock.GetRootHash(), "prevRootHash", prevMetaBlock.GetRootHash(), "validatorStatsRootHash", lastMetaBlock.GetValidatorStatsRootHash()) @@ -1514,12 +1523,12 @@ func (mp *metaProcessor) updateState(lastMetaBlock data.MetaHeaderHandler, lastM go func() { metaBlock, ok := lastMetaBlock.(*block.MetaBlock) if !ok { - log.Warn("cannot commit Trie Epoch Root Hash: lastMetaBlock is not *block.MetaBlock") + mp.log.Warn("cannot commit Trie Epoch Root Hash: lastMetaBlock is not *block.MetaBlock") return } err := mp.commitTrieEpochRootHashIfNeeded(metaBlock, lastMetaBlock.GetRootHash()) if err != nil { - log.Warn("couldn't commit trie checkpoint", "epoch", metaBlock.Epoch, "error", err) + mp.log.Warn("couldn't commit trie checkpoint", "epoch", metaBlock.Epoch, "error", err) } }() } @@ -1549,7 +1558,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( lastNotarizedMetaHeader, lastNotarizedMetaHeaderHash, err := mp.blockTracker.GetLastSelfNotarizedHeader(shardID) if err != nil { - log.Warn("getLastSelfNotarizedHeaderByShard.GetLastSelfNotarizedHeader", + mp.log.Warn("getLastSelfNotarizedHeaderByShard.GetLastSelfNotarizedHeader", "shard", shardID, "error", err.Error()) return nil, nil @@ -1565,7 +1574,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( headerInfo, ok := mp.hdrsForCurrBlock.hdrHashAndInfo[string(shardData.HeaderHash)] mp.hdrsForCurrBlock.mutHdrsForBlock.RUnlock() if !ok { - log.Debug("getLastSelfNotarizedHeaderByShard", + mp.log.Debug("getLastSelfNotarizedHeaderByShard", "error", process.ErrMissingHeader, "hash", shardData.HeaderHash) continue @@ -1573,7 +1582,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( shardHeader, ok := headerInfo.hdr.(data.ShardHeaderHandler) if !ok { - log.Debug("getLastSelfNotarizedHeaderByShard", + mp.log.Debug("getLastSelfNotarizedHeaderByShard", "error", process.ErrWrongTypeAssertion, "hash", shardData.HeaderHash) continue @@ -1587,7 +1596,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( mp.store, ) if errGet != nil { - log.Trace("getLastSelfNotarizedHeaderByShard.GetMetaHeader", "error", errGet.Error()) + mp.log.Trace("getLastSelfNotarizedHeaderByShard.GetMetaHeader", "error", errGet.Error()) continue } @@ -1600,7 +1609,7 @@ func (mp *metaProcessor) getLastSelfNotarizedHeaderByShard( } if lastNotarizedMetaHeader != nil { - log.Debug("last notarized meta header in shard", + mp.log.Debug("last notarized meta header in shard", "shard", shardID, "epoch", lastNotarizedMetaHeader.GetEpoch(), "round", lastNotarizedMetaHeader.GetRound(), @@ -1644,7 +1653,7 @@ func (mp *metaProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) err := mp.accountsDB[state.UserAccountsState].RecreateTrie(rootHashHolder) if err != nil { - log.Debug("recreate trie with error for header", + mp.log.Debug("recreate trie with error for header", "nonce", header.GetNonce(), "header root hash", header.GetRootHash(), "given root hash", rootHash, @@ -1661,7 +1670,7 @@ func (mp *metaProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash err = mp.validatorStatisticsProcessor.RevertPeerState(metaHeader) if err != nil { - log.Debug("revert peer state with error for header", + mp.log.Debug("revert peer state with error for header", "nonce", metaHeader.GetNonce(), "validators root hash", metaHeader.GetValidatorStatsRootHash(), "error", err.Error(), @@ -1672,7 +1681,7 @@ func (mp *metaProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash err = mp.epochStartTrigger.RevertStateToBlock(metaHeader) if err != nil { - log.Debug("revert epoch start trigger for header", + mp.log.Debug("revert epoch start trigger for header", "nonce", metaHeader.GetNonce(), "error", err, ) @@ -1759,7 +1768,7 @@ func (mp *metaProcessor) saveLastNotarizedHeader(header *block.MetaBlock) error hdr := lastCrossNotarizedHeaderForShard[shardID].hdr hash := lastCrossNotarizedHeaderForShard[shardID].hash mp.blockTracker.AddCrossNotarizedHeader(shardID, hdr, hash) - DisplayLastNotarized(mp.marshalizer, mp.hasher, hdr, shardID) + DisplayLastNotarized(mp.log, mp.marshalizer, mp.hasher, hdr, shardID) } return nil @@ -1776,7 +1785,7 @@ func (mp *metaProcessor) getLastCrossNotarizedShardHdrs() (map[uint32]data.Heade return nil, err } - log.Debug("lastCrossNotarizedHeader for shard", "shardID", shardID, "hash", hash) + mp.log.Debug("lastCrossNotarizedHeader for shard", "shardID", shardID, "hash", hash) lastCrossNotarizedHeader[shardID] = lastCrossNotarizedHeaderForShard usedInBlock := mp.isGenesisShardBlockAndFirstMeta(lastCrossNotarizedHeaderForShard.GetNonce()) mp.hdrsForCurrBlock.hdrHashAndInfo[string(hash)] = &hdrInfo{ @@ -1893,7 +1902,7 @@ func (mp *metaProcessor) getFinalMiniBlockHeaders(miniBlockHeaderHandlers []data miniBlockHeaders := make([]data.MiniBlockHeaderHandler, 0) for _, miniBlockHeader := range miniBlockHeaderHandlers { if !miniBlockHeader.IsFinal() { - log.Debug("metaProcessor.getFinalMiniBlockHeaders: do not check validity for mini block which is not final", "mb hash", miniBlockHeader.GetHash()) + mp.log.Debug("metaProcessor.getFinalMiniBlockHeaders: do not check validity for mini block which is not final", "mb hash", miniBlockHeader.GetHash()) continue } @@ -1950,7 +1959,7 @@ func (mp *metaProcessor) checkShardHeadersFinality( if shardHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := mp.headerValidator.IsHeaderConstructionValid(shardHdr, lastVerifiedHdr) if err != nil { - log.Debug("checkShardHeadersFinality -> isHdrConstructionValid", + mp.log.Debug("checkShardHeadersFinality -> isHdrConstructionValid", "error", err.Error()) continue } @@ -1996,7 +2005,7 @@ func (mp *metaProcessor) receivedShardHeader(headerHandler data.HeaderHandler, s return } - log.Trace("received shard header from network", + mp.log.Trace("received shard header from network", "shard", shardHeader.GetShardID(), "round", shardHeader.GetRound(), "nonce", shardHeader.GetNonce(), @@ -2023,7 +2032,7 @@ func (mp *metaProcessor) receivedShardHeader(headerHandler data.HeaderHandler, s if mp.hdrsForCurrBlock.missingHdrs == 0 && !shouldConsiderProofsForNotarization { mp.hdrsForCurrBlock.missingFinalityAttestingHdrs = mp.requestMissingFinalityAttestingShardHeaders() if mp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { - log.Debug("received all missing finality attesting shard headers") + mp.log.Debug("received all missing finality attesting shard headers") } } @@ -2080,11 +2089,11 @@ func (mp *metaProcessor) computeExistingAndRequestMissingShardHeaders(metaBlock if shardData.Nonce == mp.genesisNonce { lastCrossNotarizedHeaderForShard, hash, err := mp.blockTracker.GetLastCrossNotarizedHeader(shardData.ShardID) if err != nil { - log.Warn("computeExistingAndRequestMissingShardHeaders.GetLastCrossNotarizedHeader", "error", err.Error()) + mp.log.Warn("computeExistingAndRequestMissingShardHeaders.GetLastCrossNotarizedHeader", "error", err.Error()) continue } if !bytes.Equal(hash, shardData.HeaderHash) { - log.Warn("genesis hash missmatch", + mp.log.Warn("genesis hash missmatch", "last notarized nonce", lastCrossNotarizedHeaderForShard.GetNonce(), "last notarized hash", hash, "genesis nonce", mp.genesisNonce, @@ -2185,7 +2194,7 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { if mp.enableEpochsHandler.IsFlagEnabled(common.ScheduledMiniBlocksFlag) { miniBlockHeader := shardHdr.GetMiniBlockHeaderHandlers()[i] if !miniBlockHeader.IsFinal() { - log.Debug("metaProcessor.createShardInfo: do not create shard data with mini block which is not final", "mb hash", miniBlockHeader.GetHash()) + mp.log.Debug("metaProcessor.createShardInfo: do not create shard data with mini block which is not final", "mb hash", miniBlockHeader.GetHash()) continue } } @@ -2203,7 +2212,7 @@ func (mp *metaProcessor) createShardInfo() ([]data.ShardDataHandler, error) { shardInfo = append(shardInfo, &shardData) } - log.Debug("created shard data", + mp.log.Debug("created shard data", "size", len(shardInfo), ) return shardInfo, nil @@ -2245,14 +2254,14 @@ func (mp *metaProcessor) computeAccumulatedFeesInEpoch(metaHdr data.MetaHeaderHa currentlyAccumulatedFeesInEpoch.Add(currentlyAccumulatedFeesInEpoch, metaHdr.GetAccumulatedFees()) currentDevFeesInEpoch.Add(currentDevFeesInEpoch, metaHdr.GetDeveloperFees()) - log.Debug("computeAccumulatedFeesInEpoch - meta block fees", + mp.log.Debug("computeAccumulatedFeesInEpoch - meta block fees", "meta nonce", metaHdr.GetNonce(), "accumulatedFees", metaHdr.GetAccumulatedFees().String(), "devFees", metaHdr.GetDeveloperFees().String(), "meta leader fees", core.GetIntTrimmedPercentageOfValue(big.NewInt(0).Sub(metaHdr.GetAccumulatedFees(), metaHdr.GetDeveloperFees()), mp.economicsData.LeaderPercentage()).String()) for _, shardData := range metaHdr.GetShardInfoHandlers() { - log.Debug("computeAccumulatedFeesInEpoch - adding shard data fees", + mp.log.Debug("computeAccumulatedFeesInEpoch - adding shard data fees", "shardHeader hash", shardData.GetHeaderHash(), "shardHeader nonce", shardData.GetNonce(), "shardHeader accumulated fees", shardData.GetAccumulatedFees().String(), @@ -2264,7 +2273,7 @@ func (mp *metaProcessor) computeAccumulatedFeesInEpoch(metaHdr data.MetaHeaderHa currentDevFeesInEpoch.Add(currentDevFeesInEpoch, shardData.GetDeveloperFees()) } - log.Debug("computeAccumulatedFeesInEpoch - fees in epoch", + mp.log.Debug("computeAccumulatedFeesInEpoch - fees in epoch", "accumulatedFeesInEpoch", currentlyAccumulatedFeesInEpoch.String(), "devFeesInEpoch", currentDevFeesInEpoch.String()) @@ -2278,7 +2287,7 @@ func (mp *metaProcessor) applyBodyToHeader(metaHdr data.MetaHeaderHandler, bodyH defer func() { sw.Stop("applyBodyToHeader") - log.Debug("measurements", sw.GetMeasurements()...) + mp.log.Debug("measurements", sw.GetMeasurements()...) }() if check.IfNil(bodyHandler) { @@ -2424,7 +2433,7 @@ func (mp *metaProcessor) verifyValidatorStatisticsRootHash(header *block.MetaBlo } if !bytes.Equal(validatorStatsRH, header.GetValidatorStatsRootHash()) { - log.Debug("validator stats root hash mismatch", + mp.log.Debug("validator stats root hash mismatch", "computed", validatorStatsRH, "received", header.GetValidatorStatsRootHash(), ) @@ -2560,7 +2569,7 @@ func (mp *metaProcessor) MarshalizedDataToBroadcast( for shardId, subsetBlockBody := range bodies { buff, err := mp.marshalizer.Marshal(&block.Body{MiniBlocks: subsetBlockBody}) if err != nil { - log.Error("metaProcessor.MarshalizedDataToBroadcast.Marshal", "error", err.Error()) + mp.log.Error("metaProcessor.MarshalizedDataToBroadcast.Marshal", "error", err.Error()) continue } mrsData[shardId] = buff @@ -2577,12 +2586,12 @@ func (mp *metaProcessor) getAllMarshalledTxs(body *block.Body) map[string][][]by for topic, marshalledTxs := range marshalledRewardsTxs { allMarshalledTxs[topic] = append(allMarshalledTxs[topic], marshalledTxs...) - log.Trace("metaProcessor.getAllMarshalledTxs", "topic", topic, "num rewards txs", len(marshalledTxs)) + mp.log.Trace("metaProcessor.getAllMarshalledTxs", "topic", topic, "num rewards txs", len(marshalledTxs)) } for topic, marshalledTxs := range marshalledValidatorInfoTxs { allMarshalledTxs[topic] = append(allMarshalledTxs[topic], marshalledTxs...) - log.Trace("metaProcessor.getAllMarshalledTxs", "topic", topic, "num validator info txs", len(marshalledTxs)) + mp.log.Trace("metaProcessor.getAllMarshalledTxs", "topic", topic, "num validator info txs", len(marshalledTxs)) } return allMarshalledTxs @@ -2684,7 +2693,7 @@ func (mp *metaProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { metaBlock := &block.MetaBlock{} err := mp.marshalizer.Unmarshal(metaBlock, dta) if err != nil { - log.Debug("DecodeBlockHeader.Unmarshal", "error", err.Error()) + mp.log.Debug("DecodeBlockHeader.Unmarshal", "error", err.Error()) return nil } diff --git a/process/block/shardblock.go b/process/block/shardblock.go index d35ed73aa6b..6e317d23a34 100644 --- a/process/block/shardblock.go +++ b/process/block/shardblock.go @@ -80,7 +80,14 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { return nil, err } + var log logger.Logger + log = logger.GetOrCreate("process/sync") + if arguments.Logger != nil { + log = arguments.Logger + } + base := &baseProcessor{ + log: log, accountsDB: arguments.AccountsDB, blockSizeThrottler: arguments.BlockSizeThrottler, forkDetector: arguments.ForkDetector, @@ -135,6 +142,7 @@ func NewShardProcessor(arguments ArgShardProcessor) (*shardProcessor, error) { } argsTransactionCounter := ArgsTransactionCounter{ + Logger: log, AppStatusHandler: sp.appStatusHandler, Hasher: sp.hasher, Marshalizer: sp.marshalizer, @@ -176,7 +184,7 @@ func (sp *shardProcessor) ProcessBlock( err := sp.checkBlockValidity(headerHandler, bodyHandler) if err != nil { if errors.Is(err, process.ErrBlockHashDoesNotMatch) { - log.Debug("requested missing shard header", + sp.log.Debug("requested missing shard header", "hash", headerHandler.GetPrevHash(), "for shard", headerHandler.GetShardID(), ) @@ -196,7 +204,7 @@ func (sp *shardProcessor) ProcessBlock( return err } - log.Debug("started processing block", + sp.log.Debug("started processing block", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -226,9 +234,9 @@ func (sp *shardProcessor) ProcessBlock( } txCounts, rewardCounts, unsignedCounts := sp.txCounter.getPoolCounts(sp.dataPool) - log.Debug("total txs in pool", "counts", txCounts.String()) - log.Debug("total txs in rewards pool", "counts", rewardCounts.String()) - log.Debug("total txs in unsigned pool", "counts", unsignedCounts.String()) + sp.log.Debug("total txs in pool", "counts", txCounts.String()) + sp.log.Debug("total txs in rewards pool", "counts", rewardCounts.String()) + sp.log.Debug("total txs in unsigned pool", "counts", unsignedCounts.String()) go getMetricsFromHeader(header, uint64(txCounts.GetTotal()), sp.marshalizer, sp.appStatusHandler) @@ -254,12 +262,12 @@ func (sp *shardProcessor) ProcessBlock( haveMissingMetaHeaders := requestedMetaHdrs > 0 || requestedFinalityAttestingMetaHdrs > 0 if haveMissingMetaHeaders { if requestedMetaHdrs > 0 { - log.Debug("requested missing meta headers", + sp.log.Debug("requested missing meta headers", "num headers", requestedMetaHdrs, ) } if requestedFinalityAttestingMetaHdrs > 0 { - log.Debug("requested missing finality attesting meta headers", + sp.log.Debug("requested missing finality attesting meta headers", "num finality meta headers", requestedFinalityAttestingMetaHdrs, ) } @@ -273,7 +281,7 @@ func (sp *shardProcessor) ProcessBlock( sp.hdrsForCurrBlock.resetMissingHdrs() if requestedMetaHdrs > 0 { - log.Debug("received missing meta headers", + sp.log.Debug("received missing meta headers", "num headers", requestedMetaHdrs-missingMetaHdrs, ) } @@ -289,7 +297,7 @@ func (sp *shardProcessor) ProcessBlock( } if sp.accountsDB[state.UserAccountsState].JournalLen() != 0 { - log.Error("shardProcessor.ProcessBlock first entry", "stack", string(sp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) + sp.log.Error("shardProcessor.ProcessBlock first entry", "stack", string(sp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) return process.ErrAccountStateDirty } @@ -299,6 +307,8 @@ func (sp *shardProcessor) ProcessBlock( if !sp.proofsPool.HasProof(core.MetachainShardId, metaBlockHash) { return fmt.Errorf("%w for header hash %s", process.ErrMissingHeaderProof, hex.EncodeToString(metaBlockHash)) } + + sp.log.Debug("checking cross notarized metablocks: has proof for meta header", "headerHash", metaBlockHash) } } @@ -338,7 +348,7 @@ func (sp *shardProcessor) ProcessBlock( startTime := time.Now() err = sp.txCoordinator.ProcessBlockTransaction(header, &block.Body{MiniBlocks: miniBlocks}, haveTime) elapsedTime := time.Since(startTime) - log.Debug("elapsed time to process block transaction", + sp.log.Debug("elapsed time to process block transaction", "time [s]", elapsedTime, ) if err != nil { @@ -420,7 +430,7 @@ func (sp *shardProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash rootHashHolder := holders.NewDefaultRootHashesHolder(rootHash) err := sp.accountsDB[state.UserAccountsState].RecreateTrie(rootHashHolder) if err != nil { - log.Debug("recreate trie with error for header", + sp.log.Debug("recreate trie with error for header", "nonce", header.GetNonce(), "header root hash", header.GetRootHash(), "given root hash", rootHash, @@ -432,7 +442,7 @@ func (sp *shardProcessor) RevertStateToBlock(header data.HeaderHandler, rootHash err = sp.epochStartTrigger.RevertStateToBlock(header) if err != nil { - log.Debug("revert epoch start trigger for header", + sp.log.Debug("revert epoch start trigger for header", "nonce", header.GetNonce(), "error", err, ) @@ -493,7 +503,7 @@ func (sp *shardProcessor) checkEpochCorrectness( header.GetEpoch() == sp.epochStartTrigger.MetaEpoch() if isEpochStartMetaHashIncorrect { go sp.requestHandler.RequestMetaHeader(header.GetEpochStartMetaHash()) - log.Warn("epoch start meta hash mismatch", "proposed", header.GetEpochStartMetaHash(), "calculated", sp.epochStartTrigger.EpochStartMetaHdrHash()) + sp.log.Warn("epoch start meta hash mismatch", "proposed", header.GetEpochStartMetaHash(), "calculated", sp.epochStartTrigger.EpochStartMetaHdrHash()) return fmt.Errorf("%w proposed header with epoch %d has invalid epochStartMetaHash", process.ErrEpochDoesNotMatch, header.GetEpoch()) } @@ -536,7 +546,7 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { return err } - log.Trace("checkMetaHeadersValidityAndFinality", "lastCrossNotarizedHeader nonce", lastCrossNotarizedHeader.GetNonce()) + sp.log.Trace("checkMetaHeadersValidityAndFinality", "lastCrossNotarizedHeader nonce", lastCrossNotarizedHeader.GetNonce()) usedMetaHdrs, err := sp.sortHeadersForCurrentBlockByNonce(true) if err != nil { return err @@ -546,7 +556,7 @@ func (sp *shardProcessor) checkMetaHeadersValidityAndFinality() error { } for _, metaHdr := range usedMetaHdrs[core.MetachainShardId] { - log.Trace("checkMetaHeadersValidityAndFinality", "metaHeader nonce", metaHdr.GetNonce()) + sp.log.Trace("checkMetaHeadersValidityAndFinality", "metaHeader nonce", metaHdr.GetNonce()) err = sp.headerValidator.IsHeaderConstructionValid(metaHdr, lastCrossNotarizedHeader) if err != nil { return fmt.Errorf("%w : checkMetaHeadersValidityAndFinality -> isHdrConstructionValid", err) @@ -600,7 +610,7 @@ func (sp *shardProcessor) checkMetaHdrFinality(header data.HeaderHandler) error if metaHdr.GetNonce() == lastVerifiedHdr.GetNonce()+1 { err := sp.headerValidator.IsHeaderConstructionValid(metaHdr, lastVerifiedHdr) if err != nil { - log.Debug("checkMetaHdrFinality -> isHdrConstructionValid", + sp.log.Debug("checkMetaHdrFinality -> isHdrConstructionValid", "error", err.Error()) continue } @@ -624,7 +634,7 @@ func (sp *shardProcessor) checkAndRequestIfMetaHeadersMissing() { err := sp.requestHeadersIfMissing(orderedMetaBlocks, core.MetachainShardId) if err != nil { - log.Debug("checkAndRequestIfMetaHeadersMissing", "error", err.Error()) + sp.log.Debug("checkAndRequestIfMetaHeadersMissing", "error", err.Error()) } } @@ -638,7 +648,7 @@ func (sp *shardProcessor) indexBlockIfNeeded( return } - log.Debug("preparing to index block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) + sp.log.Debug("preparing to index block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) argSaveBlock, err := sp.outportDataProvider.PrepareOutportSaveBlockData(processOutport.ArgPrepareOutportSaveBlockData{ HeaderHash: headerHash, Header: header, @@ -648,18 +658,18 @@ func (sp *shardProcessor) indexBlockIfNeeded( HighestFinalBlockHash: sp.forkDetector.GetHighestFinalBlockHash(), }) if err != nil { - log.Error("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error(), + sp.log.Error("shardProcessor.indexBlockIfNeeded cannot prepare argSaveBlock", "error", err.Error(), "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) return } err = sp.outportHandler.SaveBlock(argSaveBlock) if err != nil { - log.Error("shardProcessor.outportHandler.SaveBlock cannot save block", "error", err, + sp.log.Error("shardProcessor.outportHandler.SaveBlock cannot save block", "error", err, "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) return } - log.Debug("indexed block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) + sp.log.Debug("indexed block", "hash", headerHash, "nonce", header.GetNonce(), "round", header.GetRound()) shardID := sp.shardCoordinator.SelfId() indexRoundInfo(sp.outportHandler, sp.nodesCoordinator, shardID, header, lastBlockHeader, argSaveBlock.SignersIndexes) @@ -702,7 +712,7 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool( for _, metaBlockHash := range metaBlockHashes { metaBlock, errNotCritical := process.GetMetaHeaderFromStorage(metaBlockHash, sp.marshalizer, sp.store) if errNotCritical != nil { - log.Debug("meta block is not fully processed yet and not committed in MetaBlockUnit", + sp.log.Debug("meta block is not fully processed yet and not committed in MetaBlockUnit", "hash", metaBlockHash) continue } @@ -717,14 +727,14 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool( metablockStorer, err := sp.store.GetStorer(dataRetriever.MetaBlockUnit) if err != nil { - log.Debug("unable to get storage unit", + sp.log.Debug("unable to get storage unit", "unit", dataRetriever.MetaBlockUnit.String()) return err } err = metablockStorer.Remove(metaBlockHash) if err != nil { - log.Debug("unable to remove hash from MetaBlockUnit", + sp.log.Debug("unable to remove hash from MetaBlockUnit", "hash", metaBlockHash) return err } @@ -733,18 +743,18 @@ func (sp *shardProcessor) restoreMetaBlockIntoPool( metaHdrNonceHashStorer, err := sp.store.GetStorer(dataRetriever.MetaHdrNonceHashDataUnit) if err != nil { - log.Debug("unable to get storage unit", + sp.log.Debug("unable to get storage unit", "unit", dataRetriever.MetaHdrNonceHashDataUnit.String()) return err } errNotCritical = metaHdrNonceHashStorer.Remove(nonceToByteSlice) if errNotCritical != nil { - log.Debug("error not critical", + sp.log.Debug("error not critical", "error", errNotCritical.Error()) } - log.Trace("meta block has been restored successfully", + sp.log.Trace("meta block has been restored successfully", "round", metaBlock.Round, "nonce", metaBlock.Nonce, "hash", metaBlockHash) @@ -797,7 +807,7 @@ func (sp *shardProcessor) rollBackProcessedMiniBlocksInfo(headerHandler data.Hea for miniBlockHash := range mapMiniBlockHashes { miniBlockHeader := process.GetMiniBlockHeaderWithHash(headerHandler, []byte(miniBlockHash)) if miniBlockHeader == nil { - log.Warn("shardProcessor.rollBackProcessedMiniBlocksInfo: GetMiniBlockHeaderWithHash", + sp.log.Warn("shardProcessor.rollBackProcessedMiniBlocksInfo: GetMiniBlockHeaderWithHash", "mb hash", miniBlockHash, "error", process.ErrMissingMiniBlockHeader) continue @@ -820,7 +830,7 @@ func (sp *shardProcessor) rollBackProcessedMiniBlockInfo(miniBlockHeader data.Mi _, metaBlockHash := sp.processedMiniBlocksTracker.GetProcessedMiniBlockInfo(miniBlockHash) if metaBlockHash == nil { - log.Warn("shardProcessor.rollBackProcessedMiniBlockInfo: mini block was not found in ProcessedMiniBlockTracker component", + sp.log.Warn("shardProcessor.rollBackProcessedMiniBlockInfo: mini block was not found in ProcessedMiniBlockTracker component", "sender shard", miniBlockHeader.GetSenderShardID(), "receiver shard", miniBlockHeader.GetReceiverShardID(), "tx count", miniBlockHeader.GetTxCount(), @@ -860,7 +870,7 @@ func (sp *shardProcessor) CreateBlock( // placeholder for shardProcessor.CreateBlock script 2 if sp.epochStartTrigger.IsEpochStart() { - log.Debug("CreateBlock", "IsEpochStart", sp.epochStartTrigger.IsEpochStart(), + sp.log.Debug("CreateBlock", "IsEpochStart", sp.epochStartTrigger.IsEpochStart(), "epoch start meta header hash", sp.epochStartTrigger.EpochStartMetaHdrHash()) err = shardHdr.SetEpochStartMetaHash(sp.epochStartTrigger.EpochStartMetaHdrHash()) if err != nil { @@ -869,7 +879,7 @@ func (sp *shardProcessor) CreateBlock( epoch := sp.epochStartTrigger.MetaEpoch() if initialHdr.GetEpoch() != epoch { - log.Debug("shardProcessor.CreateBlock: epoch from header is not the same as epoch from epoch start trigger, overwriting", + sp.log.Debug("shardProcessor.CreateBlock: epoch from header is not the same as epoch from epoch start trigger, overwriting", "epoch from header", initialHdr.GetEpoch(), "epoch from epoch start trigger", epoch) err = shardHdr.SetEpoch(epoch) if err != nil { @@ -891,7 +901,7 @@ func (sp *shardProcessor) CreateBlock( } for _, miniBlock := range finalBody.MiniBlocks { - log.Trace("CreateBlock: miniblock", + sp.log.Trace("CreateBlock: miniblock", "sender shard", miniBlock.SenderShardID, "receiver shard", miniBlock.ReceiverShardID, "type", miniBlock.Type, @@ -906,7 +916,7 @@ func (sp *shardProcessor) CreateBlock( func (sp *shardProcessor) createBlockBody(shardHdr data.HeaderHandler, haveTime func() bool) (*block.Body, map[string]*processedMb.ProcessedMiniBlockInfo, error) { sp.blockSizeThrottler.ComputeCurrentMaxSize() - log.Debug("started creating block body", + sp.log.Debug("started creating block body", "epoch", shardHdr.GetEpoch(), "round", shardHdr.GetRound(), "nonce", shardHdr.GetNonce(), @@ -944,7 +954,7 @@ func (sp *shardProcessor) CommitBlock( sp.store.SetEpochForPutOperation(headerHandler.GetEpoch()) - log.Debug("started committing block", + sp.log.Debug("started committing block", "epoch", headerHandler.GetEpoch(), "shard", headerHandler.GetShardID(), "round", headerHandler.GetRound(), @@ -1013,7 +1023,7 @@ func (sp *shardProcessor) CommitBlock( return err } - log.Info("shard block has been committed successfully", + sp.log.Info("shard block has been committed successfully", "epoch", header.GetEpoch(), "shard", header.GetShardID(), "round", header.GetRound(), @@ -1026,17 +1036,17 @@ func (sp *shardProcessor) CommitBlock( errNotCritical := sp.checkSentSignaturesAtCommitTime(headerHandler) if errNotCritical != nil { - log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) + sp.log.Debug("checkSentSignaturesBeforeCommitting", "error", errNotCritical.Error()) } errNotCritical = sp.updateCrossShardInfo(processedMetaHdrs) if errNotCritical != nil { - log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) + sp.log.Debug("updateCrossShardInfo", "error", errNotCritical.Error()) } errNotCritical = sp.forkDetector.AddHeader(header, headerHash, process.BHProcessed, selfNotarizedHeaders, selfNotarizedHeadersHashes) if errNotCritical != nil { - log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) + sp.log.Debug("forkDetector.AddHeader", "error", errNotCritical.Error()) } currentHeader, currentHeaderHash := getLastSelfNotarizedHeaderByItself(sp.blockChain) @@ -1054,7 +1064,7 @@ func (sp *shardProcessor) CommitBlock( sp.updateState(selfNotarizedHeaders, header) highestFinalBlockNonce := sp.forkDetector.GetHighestFinalBlockNonce() - log.Debug("highest final shard block", + sp.log.Debug("highest final shard block", "shard", sp.shardCoordinator.SelfId(), "nonce", highestFinalBlockNonce, ) @@ -1132,7 +1142,7 @@ func (sp *shardProcessor) CommitBlock( errNotCritical = sp.removeTxsFromPools(header, body) if errNotCritical != nil { - log.Debug("removeTxsFromPools", "error", errNotCritical.Error()) + sp.log.Debug("removeTxsFromPools", "error", errNotCritical.Error()) } sp.cleanupPools(headerHandler) @@ -1149,7 +1159,7 @@ func (sp *shardProcessor) notifyFinalMetaHdrs(processedMetaHeaders []data.Header for _, metaHeader := range processedMetaHeaders { metaHeaderHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, metaHeader) if err != nil { - log.Debug("shardProcessor.notifyFinalMetaHdrs", "error", err.Error()) + sp.log.Debug("shardProcessor.notifyFinalMetaHdrs", "error", err.Error()) continue } @@ -1166,11 +1176,11 @@ func (sp *shardProcessor) displayPoolsInfo() { headersPool := sp.dataPool.Headers() miniBlocksPool := sp.dataPool.MiniBlocks() - log.Trace("pools info", + sp.log.Trace("pools info", "shard", sp.shardCoordinator.SelfId(), "num headers", headersPool.GetNumHeaders(sp.shardCoordinator.SelfId())) - log.Trace("pools info", + sp.log.Trace("pools info", "shard", core.MetachainShardId, "num headers", headersPool.GetNumHeaders(core.MetachainShardId)) @@ -1178,7 +1188,7 @@ func (sp *shardProcessor) displayPoolsInfo() { // (in this case this number is equal with: self shard + metachain) numShardsToKeepHeaders := 2 capacity := headersPool.MaxSize() * numShardsToKeepHeaders - log.Debug("pools info", + sp.log.Debug("pools info", "total headers", headersPool.Len(), "headers pool capacity", capacity, "total miniblocks", miniBlocksPool.Len(), @@ -1204,7 +1214,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade sp.store, ) if errNotCritical != nil { - log.Debug("could not get shard header from storage") + sp.log.Debug("could not get shard header from storage") return } if header.IsStartOfEpochBlock() { @@ -1213,7 +1223,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade headerHash, err := core.CalculateHash(sp.marshalizer, sp.hasher, header) if err != nil { - log.Debug("updateState.CalculateHash", "error", err.Error()) + sp.log.Debug("updateState.CalculateHash", "error", err.Error()) return } @@ -1232,7 +1242,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade prevHeaderRootHashForPruning = prevHeaderAdditionalData.GetScheduledRootHash() } - log.Trace("updateState: prevHeader", + sp.log.Trace("updateState: prevHeader", "shard", prevHeader.GetShardID(), "epoch", prevHeader.GetEpoch(), "round", prevHeader.GetRound(), @@ -1242,7 +1252,7 @@ func (sp *shardProcessor) updateState(headers []data.HeaderHandler, currentHeade "scheduled root hash after processing", scheduledPrevHeaderRootHash, ) - log.Trace("updateState: currHeader", + sp.log.Trace("updateState: currHeader", "shard", header.GetShardID(), "epoch", header.GetEpoch(), "round", header.GetRound(), @@ -1297,18 +1307,18 @@ func (sp *shardProcessor) snapShotEpochStartFromMeta(header data.ShardHeaderHand rootHash := epochStartShData.RootHash schRootHash := epochStartShData.GetScheduledRootHash() if schRootHash != nil { - log.Debug("using scheduled root hash for snapshotting", "schRootHash", schRootHash) + sp.log.Debug("using scheduled root hash for snapshotting", "schRootHash", schRootHash) rootHash = schRootHash } epoch := sp.epochStartTrigger.MetaEpoch() - log.Debug("shard trie snapshot from epoch start shard data", "rootHash", rootHash, "epoch", epoch) + sp.log.Debug("shard trie snapshot from epoch start shard data", "rootHash", rootHash, "epoch", epoch) accounts.SnapshotState(rootHash, epoch) sp.markSnapshotDoneInPeerAccounts() saveEpochStartEconomicsMetrics(sp.appStatusHandler, metaHdr) go func() { err := sp.commitTrieEpochRootHashIfNeeded(metaHdr, rootHash) if err != nil { - log.Warn("couldn't commit trie checkpoint", "epoch", header.GetEpoch(), "error", err) + sp.log.Warn("couldn't commit trie checkpoint", "epoch", header.GetEpoch(), "error", err) } }() } @@ -1318,18 +1328,18 @@ func (sp *shardProcessor) snapShotEpochStartFromMeta(header data.ShardHeaderHand func (sp *shardProcessor) markSnapshotDoneInPeerAccounts() { peerAccounts := sp.accountsDB[state.PeerAccountsState] if check.IfNil(peerAccounts) { - log.Warn("programming error: peerAccounts is nil while trying to take a snapshot on a shard node: this can cause OOM exceptions") + sp.log.Warn("programming error: peerAccounts is nil while trying to take a snapshot on a shard node: this can cause OOM exceptions") return } peerAccountsHandler, ok := peerAccounts.(peerAccountsDBHandler) if !ok { - log.Warn("programming error: peerAccounts is not of type peerAccountsDBHandler: this can cause OOM exceptions") + sp.log.Warn("programming error: peerAccounts is not of type peerAccountsDBHandler: this can cause OOM exceptions") return } peerAccountsHandler.MarkSnapshotDone() - log.Debug("shardProcessor.markSnapshotDoneInPeerAccounts completed") + sp.log.Debug("shardProcessor.markSnapshotDoneInPeerAccounts completed") } func (sp *shardProcessor) checkEpochCorrectnessCrossChain() error { @@ -1372,7 +1382,7 @@ func (sp *shardProcessor) checkEpochCorrectnessCrossChain() error { } if shouldRevertChain { - log.Debug("blockchain is wrongly constructed", + sp.log.Debug("blockchain is wrongly constructed", "reverted to nonce", nonce) sp.forkDetector.SetRollBackNonce(nonce) @@ -1390,7 +1400,7 @@ func (sp *shardProcessor) getLastSelfNotarizedHeaderByMetachain() (data.HeaderHa hash := sp.forkDetector.GetHighestFinalBlockHash() header, err := process.GetShardHeader(hash, sp.dataPool.Headers(), sp.marshalizer, sp.store) if err != nil { - log.Warn("getLastSelfNotarizedHeaderByMetachain.GetShardHeader", "error", err.Error(), "hash", hash, "nonce", sp.forkDetector.GetHighestFinalBlockNonce()) + sp.log.Warn("getLastSelfNotarizedHeaderByMetachain.GetShardHeader", "error", err.Error(), "hash", hash, "nonce", sp.forkDetector.GetHighestFinalBlockNonce()) return nil, nil } @@ -1415,7 +1425,7 @@ func (sp *shardProcessor) saveLastNotarizedHeader(shardId uint32, processedHdrs } sp.blockTracker.AddCrossNotarizedHeader(shardId, lastCrossNotarizedHeader, lastCrossNotarizedHeaderHash) - DisplayLastNotarized(sp.marshalizer, sp.hasher, lastCrossNotarizedHeader, shardId) + DisplayLastNotarized(sp.log, sp.marshalizer, sp.hasher, lastCrossNotarizedHeader, shardId) return nil } @@ -1523,7 +1533,7 @@ func (sp *shardProcessor) getHighestHdrForShardFromMetachain(shardId uint32, hdr if err != nil { go sp.requestHandler.RequestShardHeader(shardInfo.ShardID, shardInfo.HeaderHash) - log.Debug("requested missing shard header", + sp.log.Debug("requested missing shard header", "hash", shardInfo.HeaderHash, "shard", shardInfo.ShardID, ) @@ -1548,7 +1558,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromHeader(header data.He miniBlockHashes[i] = miniBlockHeaders[i].GetHash() } - log.Trace("cross mini blocks in body", + sp.log.Trace("cross mini blocks in body", "num miniblocks", len(miniBlockHashes), ) @@ -1598,7 +1608,7 @@ func (sp *shardProcessor) addProcessedCrossMiniBlocksFromHeader(headerHandler da miniBlockHeader := process.GetMiniBlockHeaderWithHash(headerHandler, miniBlockHash) if miniBlockHeader == nil { - log.Warn("shardProcessor.addProcessedCrossMiniBlocksFromHeader: GetMiniBlockHeaderWithHash", "mb hash", miniBlockHash, "error", process.ErrMissingMiniBlockHeader) + sp.log.Warn("shardProcessor.addProcessedCrossMiniBlocksFromHeader: GetMiniBlockHeaderWithHash", "mb hash", miniBlockHash, "error", process.ErrMissingMiniBlockHeader) continue } @@ -1635,7 +1645,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( return nil, process.ErrWrongTypeAssertion } - log.Trace("meta header", + sp.log.Trace("meta header", "nonce", metaBlock.Nonce, ) @@ -1655,7 +1665,7 @@ func (sp *shardProcessor) getOrderedProcessedMetaBlocksFromMiniBlockHashes( delete(miniBlockHashes, key) } - log.Trace("cross mini blocks in meta header", + sp.log.Trace("cross mini blocks in meta header", "num miniblocks", len(crossMiniBlockHashes), ) @@ -1696,7 +1706,7 @@ func (sp *shardProcessor) updateCrossShardInfo(processedMetaHdrs []data.HeaderHa // metablock was processed and finalized marshalizedHeader, errMarshal := sp.marshalizer.Marshal(hdr) if errMarshal != nil { - log.Debug("updateCrossShardInfo.Marshal", "error", errMarshal.Error()) + sp.log.Debug("updateCrossShardInfo.Marshal", "error", errMarshal.Error()) continue } @@ -1719,7 +1729,7 @@ func (sp *shardProcessor) receivedMetaBlock(headerHandler data.HeaderHandler, me return } - log.Trace("received meta block from network", + sp.log.Trace("received meta block from network", "round", metaBlock.Round, "nonce", metaBlock.Nonce, "hash", metaBlockHash, @@ -1747,7 +1757,7 @@ func (sp *shardProcessor) receivedMetaBlock(headerHandler data.HeaderHandler, me hasProofForMetablock = sp.hasProofForMetablock(metaBlockHash, metaBlock) if sp.hdrsForCurrBlock.missingFinalityAttestingHdrs == 0 { - log.Debug("received all missing finality attesting meta headers") + sp.log.Debug("received all missing finality attesting meta headers") } } @@ -1893,18 +1903,18 @@ func (sp *shardProcessor) getAllMiniBlockDstMeFromMeta(header data.ShardHeaderHa // full verification through metachain header func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) (*createAndProcessMiniBlocksDestMeInfo, error) { - log.Debug("createAndProcessMiniBlocksDstMe has been started") + sp.log.Debug("createAndProcessMiniBlocksDstMe has been started") sw := core.NewStopWatch() sw.Start("ComputeLongestMetaChainFromLastNotarized") orderedMetaBlocks, orderedMetaBlocksHashes, err := sp.blockTracker.ComputeLongestMetaChainFromLastNotarized() sw.Stop("ComputeLongestMetaChainFromLastNotarized") - log.Debug("measurements", sw.GetMeasurements()...) + sp.log.Debug("measurements", sw.GetMeasurements()...) if err != nil { return nil, err } - log.Debug("metablocks ordered", + sp.log.Debug("metablocks ordered", "num metablocks", len(orderedMetaBlocks), ) @@ -1931,7 +1941,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) sp.hdrsForCurrBlock.mutHdrsForBlock.Lock() for i := 0; i < len(orderedMetaBlocks); i++ { if !createAndProcessInfo.haveTime() && !createAndProcessInfo.haveAdditionalTime() { - log.Debug("time is up after putting cross txs with destination to current shard", + sp.log.Debug("time is up after putting cross txs with destination to current shard", "scheduled mode", createAndProcessInfo.scheduledMode, "num txs added", createAndProcessInfo.numTxsAdded, ) @@ -1939,7 +1949,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) } if createAndProcessInfo.numHdrsAdded >= process.MaxMetaHeadersAllowedInOneShardBlock { - log.Debug("maximum meta headers allowed to be included in one shard block has been reached", + sp.log.Debug("maximum meta headers allowed to be included in one shard block has been reached", "scheduled mode", createAndProcessInfo.scheduledMode, "meta headers added", createAndProcessInfo.numHdrsAdded, ) @@ -1948,7 +1958,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) createAndProcessInfo.currMetaHdr = orderedMetaBlocks[i] if createAndProcessInfo.currMetaHdr.GetNonce() > lastMetaHdr.GetNonce()+1 { - log.Debug("skip searching", + sp.log.Debug("skip searching", "scheduled mode", createAndProcessInfo.scheduledMode, "last meta hdr nonce", lastMetaHdr.GetNonce(), "curr meta hdr nonce", createAndProcessInfo.currMetaHdr.GetNonce()) @@ -1958,7 +1968,7 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) hasProofForHdr := sp.proofsPool.HasProof(core.MetachainShardId, orderedMetaBlocksHashes[i]) shouldConsiderProofsForNotarization := sp.enableEpochsHandler.IsFlagEnabledInEpoch(common.EquivalentMessagesFlag, orderedMetaBlocks[i].GetEpoch()) if !hasProofForHdr && shouldConsiderProofsForNotarization { - log.Trace("no proof for meta header", + sp.log.Trace("no proof for meta header", "hash", logger.DisplayByteSlice(orderedMetaBlocksHashes[i]), ) break @@ -1994,14 +2004,14 @@ func (sp *shardProcessor) createAndProcessMiniBlocksDstMe(haveTime func() bool) go sp.requestMetaHeadersIfNeeded(createAndProcessInfo.numHdrsAdded, lastMetaHdr) for _, miniBlock := range createAndProcessInfo.miniBlocks { - log.Debug("mini block info", + sp.log.Debug("mini block info", "type", miniBlock.Type, "sender shard", miniBlock.SenderShardID, "receiver shard", miniBlock.ReceiverShardID, "txs added", len(miniBlock.TxHashes)) } - log.Debug("createAndProcessMiniBlocksDstMe has been finished", + sp.log.Debug("createAndProcessMiniBlocksDstMe has been finished", "num txs added", createAndProcessInfo.numTxsAdded, "num hdrs added", createAndProcessInfo.numHdrsAdded) @@ -2042,7 +2052,7 @@ func (sp *shardProcessor) createMbsAndProcessCrossShardTransactionsDstMe( } if !hdrProcessFinished { - log.Debug("meta block cannot be fully processed", + sp.log.Debug("meta block cannot be fully processed", "scheduled mode", createAndProcessInfo.scheduledMode, "round", createAndProcessInfo.currMetaHdr.GetRound(), "nonce", createAndProcessInfo.currMetaHdr.GetNonce(), @@ -2063,7 +2073,7 @@ func (sp *shardProcessor) createMbsAndProcessCrossShardTransactionsDstMe( } func (sp *shardProcessor) requestMetaHeadersIfNeeded(hdrsAdded uint32, lastMetaHdr data.HeaderHandler) { - log.Debug("meta headers added", + sp.log.Debug("meta headers added", "num", hdrsAdded, "highest nonce", lastMetaHdr.GetNonce(), ) @@ -2095,7 +2105,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by // placeholder for shardProcessor.createMiniBlocks script if sp.accountsDB[state.UserAccountsState].JournalLen() != 0 { - log.Error("shardProcessor.createMiniBlocks", + sp.log.Error("shardProcessor.createMiniBlocks", "error", process.ErrAccountStateDirty, "stack", string(sp.accountsDB[state.UserAccountsState].GetStackDebugFirstEntry())) @@ -2104,35 +2114,35 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by miniBlocks = append(miniBlocks, interMBs...) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + sp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } if !haveTime() { - log.Debug("shardProcessor.createMiniBlocks", "error", process.ErrTimeIsOut) + sp.log.Debug("shardProcessor.createMiniBlocks", "error", process.ErrTimeIsOut) interMBs := sp.txCoordinator.CreatePostProcessMiniBlocks() if len(interMBs) > 0 { miniBlocks = append(miniBlocks, interMBs...) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + sp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } startTime := time.Now() createAndProcessMBsDestMeInfo, err := sp.createAndProcessMiniBlocksDstMe(haveTime) elapsedTime := time.Since(startTime) - log.Debug("elapsed time to create mbs to me", "time", elapsedTime) + sp.log.Debug("elapsed time to create mbs to me", "time", elapsedTime) if err != nil { - log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) + sp.log.Debug("createAndProcessCrossMiniBlocksDstMe", "error", err.Error()) } if createAndProcessMBsDestMeInfo != nil { processedMiniBlocksDestMeInfo = createAndProcessMBsDestMeInfo.allProcessedMiniBlocksInfo if len(createAndProcessMBsDestMeInfo.miniBlocks) > 0 { miniBlocks = append(miniBlocks, createAndProcessMBsDestMeInfo.miniBlocks...) - log.Debug("processed miniblocks and txs with destination in self shard", + sp.log.Debug("processed miniblocks and txs with destination in self shard", "num miniblocks", len(createAndProcessMBsDestMeInfo.miniBlocks), "num txs", createAndProcessMBsDestMeInfo.numTxsAdded, "num meta headers", createAndProcessMBsDestMeInfo.numHdrsAdded) @@ -2140,7 +2150,7 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by } if sp.blockTracker.IsShardStuck(core.MetachainShardId) { - log.Warn("shardProcessor.createMiniBlocks", + sp.log.Warn("shardProcessor.createMiniBlocks", "error", process.ErrShardIsStuck, "shard", core.MetachainShardId) @@ -2149,14 +2159,14 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by miniBlocks = append(miniBlocks, interMBs...) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + sp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } startTime = time.Now() mbsFromMe := sp.txCoordinator.CreateMbsAndProcessTransactionsFromMe(haveTime, randomness) elapsedTime = time.Since(startTime) - log.Debug("elapsed time to create mbs from me", "time", elapsedTime) + sp.log.Debug("elapsed time to create mbs from me", "time", elapsedTime) if len(mbsFromMe) > 0 { miniBlocks = append(miniBlocks, mbsFromMe...) @@ -2166,12 +2176,12 @@ func (sp *shardProcessor) createMiniBlocks(haveTime func() bool, randomness []by numTxs += len(mb.TxHashes) } - log.Debug("processed miniblocks and txs from self shard", + sp.log.Debug("processed miniblocks and txs from self shard", "num miniblocks", len(mbsFromMe), "num txs", numTxs) } - log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) + sp.log.Debug("creating mini blocks has been finished", "num miniblocks", len(miniBlocks)) return &block.Body{MiniBlocks: miniBlocks}, processedMiniBlocksDestMeInfo, nil } @@ -2185,7 +2195,7 @@ func (sp *shardProcessor) applyBodyToHeader( sw.Start("applyBodyToHeader") defer func() { sw.Stop("applyBodyToHeader") - log.Debug("measurements", sw.GetMeasurements()...) + sp.log.Debug("measurements", sw.GetMeasurements()...) }() var err error @@ -2324,7 +2334,7 @@ func (sp *shardProcessor) MarshalizedDataToBroadcast( bodyForShard := block.Body{MiniBlocks: subsetBlockBody} buff, errMarshal := sp.marshalizer.Marshal(&bodyForShard) if errMarshal != nil { - log.Error("shardProcessor.MarshalizedDataToBroadcast.Marshal", "error", errMarshal.Error()) + sp.log.Error("shardProcessor.MarshalizedDataToBroadcast.Marshal", "error", errMarshal.Error()) continue } mrsData[shardId] = buff @@ -2431,7 +2441,7 @@ func (sp *shardProcessor) DecodeBlockHeader(dta []byte) data.HeaderHandler { header, err := process.UnmarshalShardHeader(sp.marshalizer, dta) if err != nil { - log.Debug("DecodeBlockHeader.UnmarshalShardHeader", "error", err.Error()) + sp.log.Debug("DecodeBlockHeader.UnmarshalShardHeader", "error", err.Error()) return nil }