Skip to content

Commit

Permalink
Cleaned up the Prints for Testnet
Browse files Browse the repository at this point in the history
  • Loading branch information
gameofpointers committed Sep 17, 2023
1 parent 37534c2 commit 7ce4117
Show file tree
Hide file tree
Showing 9 changed files with 47 additions and 28 deletions.
2 changes: 1 addition & 1 deletion core/block_validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
if etxHash := types.DeriveSha(emittedEtxs, trie.NewStackTrie(nil)); etxHash != header.EtxHash() {
return fmt.Errorf("invalid etx hash (remote: %x local: %x)", header.EtxHash(), etxHash)
}
log.Info("times during validate state:", "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6)
log.Debug("times during validate state:", "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6)
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion core/bodydb.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func (bc *BodyDb) Append(block *types.Block, newInboundEtxs types.Transactions)
}
rawdb.WriteTxLookupEntriesByBlock(batch, block)
}
log.Info("Time taken to", "apply state:", common.PrettyDuration(time.Since(stateApply)))
log.Debug("Time taken to", "apply state:", common.PrettyDuration(time.Since(stateApply)))

rawdb.WriteBlock(batch, block)
if err = batch.Write(); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion core/headerchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ func (hc *HeaderChain) AppendBlock(block *types.Block, newInboundEtxs types.Tran
if err != nil {
return err
}
log.Info("Time taken to", "Append in bc", common.PrettyDuration(time.Since(blockappend)))
log.Debug("Time taken to", "Append in bc", common.PrettyDuration(time.Since(blockappend)))

hc.bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
if len(logs) > 0 {
Expand Down
25 changes: 12 additions & 13 deletions core/slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do
}
// Don't append the block which already exists in the database.
if sl.hc.HasHeader(header.Hash(), header.NumberU64()) && (sl.hc.GetTerminiByHash(header.Hash()) != nil) {
log.Warn("Block has already been appended: ", "Hash: ", header.Hash())
log.Debug("Block has already been appended: ", "Hash: ", header.Hash())
return nil, false, ErrKnownBlock
}
time1 := common.PrettyDuration(time.Since(start))
Expand All @@ -170,7 +170,7 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do
if err != nil {
return nil, false, err
}
log.Info("PCRC done", "hash", header.Hash(), "number", header.NumberArray(), "termini", newTermini)
log.Debug("PCRC done", "hash", header.Hash(), "number", header.NumberArray(), "termini", newTermini)

time2 := common.PrettyDuration(time.Since(start))
// Append the new block
Expand Down Expand Up @@ -319,8 +319,8 @@ func (sl *Slice) Append(header *types.Header, domPendingHeader *types.Header, do
sl.relayPh(block, pendingHeaderWithTermini, domOrigin, block.Location(), subReorg)

time10 := common.PrettyDuration(time.Since(start))
log.Info("times during append:", "t0_1", time0_1, "t0_2", time0_2, "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10)
log.Info("times during sub append:", "t6_1:", time6_1, "t6_2:", time6_2, "t6_3:", time6_3)
log.Info("Times during append:", "t0_1", time0_1, "t0_2", time0_2, "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10)
log.Debug("Times during sub append:", "t6_1:", time6_1, "t6_2:", time6_2, "t6_3:", time6_3)
log.Info("Appended new block", "number", block.Header().Number(), "hash", block.Hash(),
"difficulty", block.Header().Difficulty(),
"uncles", len(block.Uncles()), "txs", len(block.Transactions()), "etxs", len(block.ExtTransactions()), "gas", block.GasUsed(), "gasLimit", block.GasLimit(),
Expand Down Expand Up @@ -395,32 +395,31 @@ func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingH
return
}
oldDomTerminus := oldDomTermini.DomTerminus()
log.Info("Updating Dom...", "oldDomTerminus:", oldDomTerminus, "newDomTerminus:", newDomTerminus, "location:", location)
// Find the dom TerminusHash with the newTerminus
newPh, newDomTerminiExists := sl.readPhCache(newDomTerminus)
if !newDomTerminiExists {
log.Info("Update Dom:", "newTerminus does not exist:", newDomTerminus)
log.Warn("Update Dom:", "newTerminus does not exist:", newDomTerminus)
return
}
log.Info("UpdateDom:", "newDomTerminus:", newDomTerminus, "oldDomTerminus:", oldDomTerminus, "newDomTermini:", pendingHeader.Termini().DomTermini())
log.Debug("UpdateDom:", "NewDomTerminus:", newDomTerminus, "OldDomTerminus:", oldDomTerminus, "NewDomTermini:", pendingHeader.Termini().DomTermini(), "Location")
if nodeCtx == common.REGION_CTX && oldDomTerminus == newPh.Termini().DomTerminus() {
// Can update
sl.bestPhKey = newDomTerminus
newPh, exists := sl.readPhCache(newDomTerminus)
if exists {
for _, i := range sl.randomRelayArray() {
if sl.subClients[i] != nil {
log.Info("newPh:", "parent Hash:", newPh.Header().ParentHash(), "Number", newPh.Header().NumberArray(), "newTermini:", newPh.Termini().SubTerminiAtIndex(i))
log.Info("SubRelay in UpdateDom", "parent Hash:", newPh.Header().ParentHash(), "Number", newPh.Header().NumberArray(), "newTermini:", newPh.Termini().SubTerminiAtIndex(i))
sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.Header().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx)
}
}
} else {
log.Info("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus)
log.Warn("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus)
}
return
} else {
// need to update dom
log.Info("Append need to updateDom", "oldDomTermini:", oldDomTerminus, "newDomTermini:", newPh.Termini(), "location:", common.NodeLocation)
log.Info("UpdateDom needs to updateDom", "oldDomTermini:", oldDomTerminus, "newDomTermini:", newPh.Termini(), "location:", location)
if sl.domClient != nil {
go sl.domClient.UpdateDom(context.Background(), oldDomTerminus, types.NewPendingHeader(pendingHeader.Header(), newPh.Termini()), location)
} else {
Expand All @@ -430,12 +429,12 @@ func (sl *Slice) UpdateDom(oldTerminus common.Hash, pendingHeader types.PendingH
if exists {
for _, i := range sl.randomRelayArray() {
if sl.subClients[i] != nil {
log.Info("newPh:", "parent Hash:", newPh.Header().ParentHash(), "Number", newPh.Header().NumberArray(), "newTermini:", newPh.Termini().SubTerminiAtIndex(i))
log.Info("SubRelay in UpdateDom:", "Parent Hash:", newPh.Header().ParentHash(), "Number", newPh.Header().NumberArray(), "NewTermini:", newPh.Termini().SubTerminiAtIndex(i))
sl.subClients[i].SubRelayPendingHeader(context.Background(), newPh, pendingHeader.Header().ParentEntropy(common.ZONE_CTX), common.Location{}, true, nodeCtx)
}
}
} else {
log.Info("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus)
log.Warn("Update Dom:", "phCache at newTerminus does not exist:", newDomTerminus)
}
return
}
Expand Down Expand Up @@ -775,7 +774,7 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini
bestPh, exists := sl.readPhCache(sl.bestPhKey)
nodeCtx := common.NodeLocation.Context()
if nodeCtx == common.ZONE_CTX && exists && sl.bestPhKey != localPendingHeader.Termini().DomTerminus() && !sl.poem(newEntropy, bestPh.Header().ParentEntropy()) {
log.Info("subrelay rejected", "local dom terminus", localPendingHeader.Termini().DomTerminus(), "Number", combinedPendingHeader.NumberArray(), "best ph key", sl.bestPhKey, "number", bestPh.Header().NumberArray(), "newentropy", newEntropy)
log.Warn("Subrelay Rejected", "local dom terminus", localPendingHeader.Termini().DomTerminus(), "Number", combinedPendingHeader.NumberArray(), "best ph key", sl.bestPhKey, "number", bestPh.Header().NumberArray(), "newentropy", newEntropy)
sl.updatePhCache(types.NewPendingHeader(combinedPendingHeader, localTermini), false, nil, sl.poem(newEntropy, localPendingHeader.Header().ParentEntropy()), location)
go sl.domClient.UpdateDom(context.Background(), localPendingHeader.Termini().DomTerminus(), bestPh, common.NodeLocation)
return nil
Expand Down
8 changes: 4 additions & 4 deletions core/state_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -311,10 +311,10 @@ func (p *StateProcessor) Process(block *types.Block, etxSet types.EtxSet) (types
p.engine.Finalize(p.hc, header, statedb, block.Transactions(), block.Uncles())
time5 := common.PrettyDuration(time.Since(start))

log.Info("Total Tx Processing Time", "signing time", common.PrettyDuration(timeSign), "prepare state time", common.PrettyDuration(timePrepare), "etx time", common.PrettyDuration(timeEtx), "tx time", common.PrettyDuration(timeTx))
log.Info("Time taken in Process", "time1", time1, "time2", time2, "time3", time3, "time4", time4, "time5", time5)
log.Debug("Total Tx Processing Time", "signing time", common.PrettyDuration(timeSign), "prepare state time", common.PrettyDuration(timePrepare), "etx time", common.PrettyDuration(timeEtx), "tx time", common.PrettyDuration(timeTx))
log.Debug("Time taken in Process", "time1", time1, "time2", time2, "time3", time3, "time4", time4, "time5", time5)

log.Info("Total Tx Processing Time", "signing time", common.PrettyDuration(timeSign), "senders cache time", common.PrettyDuration(timeSenders), "percent cached internal txs", fmt.Sprintf("%.2f", float64(len(senders))/float64(numInternalTxs)*100), "prepare state time", common.PrettyDuration(timePrepare), "etx time", common.PrettyDuration(timeEtx), "tx time", common.PrettyDuration(timeTx))
log.Debug("Total Tx Processing Time", "signing time", common.PrettyDuration(timeSign), "senders cache time", common.PrettyDuration(timeSenders), "percent cached internal txs", fmt.Sprintf("%.2f", float64(len(senders))/float64(numInternalTxs)*100), "prepare state time", common.PrettyDuration(timePrepare), "etx time", common.PrettyDuration(timeEtx), "tx time", common.PrettyDuration(timeTx))

return receipts, allLogs, statedb, *usedGas, nil
}
Expand Down Expand Up @@ -484,7 +484,7 @@ func (p *StateProcessor) Apply(batch ethdb.Batch, block *types.Block, newInbound
rawdb.WriteEtxSet(p.hc.bc.db, block.Hash(), block.NumberU64(), etxSet)
time12 := common.PrettyDuration(time.Since(start))

log.Info("times during state processor apply:", "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t4.5:", time4_5, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10, "t11:", time11, "t12:", time12)
log.Debug("times during state processor apply:", "t1:", time1, "t2:", time2, "t3:", time3, "t4:", time4, "t4.5:", time4_5, "t5:", time5, "t6:", time6, "t7:", time7, "t8:", time8, "t9:", time9, "t10:", time10, "t11:", time11, "t12:", time12)
return logs, nil
}

Expand Down
14 changes: 7 additions & 7 deletions core/tx_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block
}
pool.locals = newAccountSet(pool.signer)
for _, addr := range config.Locals {
log.Info("Setting new local account", "address", addr)
log.Debug("Setting new local account", "address", addr)
pool.locals.add(addr)
}
pool.priced = newTxPricedList(pool.all)
Expand Down Expand Up @@ -1305,7 +1305,7 @@ func (pool *TxPool) runReorg(done chan struct{}, cancel chan struct{}, reset *tx
pool.txFeed.Send(NewTxsEvent{txs})
}
if pool.reOrgCounter == c_reorgCounterThreshold {
log.Info("Time taken to runReorg in txpool", "time", common.PrettyDuration(time.Since(start)))
log.Debug("Time taken to runReorg in txpool", "time", common.PrettyDuration(time.Since(start)))
pool.reOrgCounter = 0
}
return
Expand Down Expand Up @@ -1401,7 +1401,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) {
senderCacher.recover(pool.signer, reinject)
pool.addTxsLocked(reinject, false)
if pool.reOrgCounter == c_reorgCounterThreshold {
log.Info("Time taken to resetTxPool", "time", common.PrettyDuration(time.Since(start)))
log.Debug("Time taken to resetTxPool", "time", common.PrettyDuration(time.Since(start)))
}
}

Expand Down Expand Up @@ -1473,7 +1473,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.InternalAddress) []*typ
}
}
if pool.reOrgCounter == c_reorgCounterThreshold {
log.Info("Time taken to promoteExecutables", "time", common.PrettyDuration(time.Since(start)))
log.Debug("Time taken to promoteExecutables", "time", common.PrettyDuration(time.Since(start)))
}
return promoted
}
Expand Down Expand Up @@ -1568,7 +1568,7 @@ func (pool *TxPool) truncatePending() {
}
pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending))
if pool.reOrgCounter == c_reorgCounterThreshold {
log.Info("Time taken to truncatePending", "time", common.PrettyDuration(time.Since(start)))
log.Debug("Time taken to truncatePending", "time", common.PrettyDuration(time.Since(start)))
}
}

Expand Down Expand Up @@ -1620,7 +1620,7 @@ func (pool *TxPool) truncateQueue() {
}
}
if pool.reOrgCounter == c_reorgCounterThreshold {
log.Info("Time taken to truncateQueue", "time", common.PrettyDuration(time.Since(start)))
log.Debug("Time taken to truncateQueue", "time", common.PrettyDuration(time.Since(start)))
}
}

Expand Down Expand Up @@ -1685,7 +1685,7 @@ func (pool *TxPool) demoteUnexecutables() {
}
}
if pool.reOrgCounter == c_reorgCounterThreshold {
log.Info("Time taken to demoteExecutables", "time", common.PrettyDuration(time.Since(start)))
log.Debug("Time taken to demoteExecutables", "time", common.PrettyDuration(time.Since(start)))
}
}

Expand Down
7 changes: 7 additions & 0 deletions core/types/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -1051,6 +1051,13 @@ type Termini struct {
subTermini []common.Hash `json:"subTermini"`
}

func (t Termini) String() string {
return fmt.Sprintf("{DomTermini: [%v, %v, %v], SubTermini: [%v, %v, %v]}",
t.DomTerminiAtIndex(0), t.DomTerminiAtIndex(1), t.DomTerminiAtIndex(2),
t.SubTerminiAtIndex(0), t.SubTerminiAtIndex(1), t.SubTerminiAtIndex(2),
)
}

func CopyTermini(termini Termini) Termini {
newTermini := EmptyTermini()
for i, t := range termini.domTermini {
Expand Down
9 changes: 9 additions & 0 deletions eth/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import (
"github.com/dominant-strategies/go-quai/event"
"github.com/dominant-strategies/go-quai/log"
"github.com/dominant-strategies/go-quai/p2p"
lru "github.com/hashicorp/golang-lru"
)

const (
Expand Down Expand Up @@ -69,6 +70,9 @@ const (

// minPeerSendTx is the minimum number of peers that will receive a new transaction.
minPeerSendTx = 2

// c_broadcastCacheSize is the Max number of broadcast block hashes to be kept for Logging
c_broadcastCacheSize = 10
)

// txPool defines the methods needed from a transaction pool implementation to
Expand Down Expand Up @@ -150,6 +154,8 @@ type handler struct {
chainSync *chainSyncer
wg sync.WaitGroup
peerWG sync.WaitGroup

broadcastCache *lru.Cache
}

// newHandler returns a handler for all Quai chain management protocol.
Expand All @@ -173,6 +179,9 @@ func newHandler(config *handlerConfig) (*handler, error) {
quitSync: make(chan struct{}),
}

broadcastCache, _ := lru.New(c_broadcastCacheSize)
h.broadcastCache = broadcastCache

h.downloader = downloader.New(h.eventMux, h.core, h.removePeer)

// Construct the fetcher (short sync)
Expand Down
6 changes: 5 additions & 1 deletion eth/handler_eth.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,11 @@ func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block) er
// Schedule the block for import
h.blockFetcher.Enqueue(peer.ID(), block)

log.Info("Received Block Broadcast", "Hash", block.Hash(), "Number", block.Header().NumberArray())
if block != nil && !h.broadcastCache.Contains(block.Hash()) {
log.Info("Received Block Broadcast", "Hash", block.Hash(), "Number", block.Header().NumberArray())
h.broadcastCache.Add(block.Hash(), true)
}

blockS := h.core.TotalLogS(block.Header())
_, _, peerEntropy, _ := peer.Head()
if blockS != nil && peerEntropy != nil {
Expand Down

0 comments on commit 7ce4117

Please sign in to comment.