From d68ff7f7b6ef6f1492144af5fca6a5ada34116d1 Mon Sep 17 00:00:00 2001 From: gop Date: Mon, 19 Aug 2024 16:00:53 -0500 Subject: [PATCH 1/3] Only generate pending header when we pick the head --- core/headerchain.go | 63 +++++++++++++-------------------------------- core/slice.go | 45 +++++++++++++++++++++++++------- 2 files changed, 53 insertions(+), 55 deletions(-) diff --git a/core/headerchain.go b/core/headerchain.go index 4b49739c34..db4ca87458 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -349,6 +349,7 @@ func (hc *HeaderChain) AppendBlock(block *types.WorkObject) error { func (hc *HeaderChain) SetCurrentHeader(head *types.WorkObject) error { hc.headermu.Lock() defer hc.headermu.Unlock() + nodeCtx := hc.NodeCtx() prevHeader := hc.CurrentHeader() // if trying to set the same header, escape @@ -367,7 +368,13 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.WorkObject) error { // If head is the normal extension of canonical head, we can return by just wiring the canonical hash. if prevHeader.Hash() == head.ParentHash(hc.NodeCtx()) { rawdb.WriteCanonicalHash(hc.headerDb, head.Hash(), head.NumberU64(hc.NodeCtx())) - return nil + if nodeCtx == common.ZONE_CTX { + err := hc.AppendBlock(head) + if err != nil { + return err + } + return nil + } } //Find a common header @@ -411,6 +418,16 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.WorkObject) error { // Run through the hash stack to update canonicalHash and forward state processor for i := len(hashStack) - 1; i >= 0; i-- { rawdb.WriteCanonicalHash(hc.headerDb, hashStack[i].Hash(), hashStack[i].NumberU64(hc.NodeCtx())) + if nodeCtx == common.ZONE_CTX { + block := hc.GetBlockOrCandidate(hashStack[i].Hash(), hashStack[i].NumberU64(nodeCtx)) + if block == nil { + return errors.New("could not find block during SetCurrentState: " + hashStack[i].Hash().String()) + } + err := hc.AppendBlock(block) + if err != nil { + return err + } + } } if hc.NodeCtx() == common.ZONE_CTX && hc.ProcessingState() { @@ -439,50 +456,6 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.WorkObject) error { return nil } -// SetCurrentState updates the current Quai state and Qi UTXO set upon which the current pending block is built -func (hc *HeaderChain) SetCurrentState(head *types.WorkObject) error { - hc.headermu.Lock() - defer hc.headermu.Unlock() - - nodeCtx := hc.NodeCtx() - if nodeCtx != common.ZONE_CTX || !hc.ProcessingState() { - return nil - } - - current := types.CopyWorkObject(head) - var headersWithoutState []*types.WorkObject - for { - headersWithoutState = append(headersWithoutState, current) - header := hc.GetHeaderByHash(current.ParentHash(nodeCtx)) - if header == nil { - return ErrSubNotSyncedToDom - } - if hc.IsGenesisHash(header.Hash()) { - break - } - - // Check if the state has been processed for this block - processedState := rawdb.ReadProcessedState(hc.headerDb, header.Hash()) - if processedState { - break - } - current = types.CopyWorkObject(header) - } - - // Run through the hash stack to update canonicalHash and forward state processor - for i := len(headersWithoutState) - 1; i >= 0; i-- { - block := hc.GetBlockOrCandidate(headersWithoutState[i].Hash(), headersWithoutState[i].NumberU64(nodeCtx)) - if block == nil { - return errors.New("could not find block during SetCurrentState: " + headersWithoutState[i].Hash().String()) - } - err := hc.AppendBlock(block) - if err != nil { - return err - } - } - return nil -} - // findCommonAncestor func (hc *HeaderChain) findCommonAncestor(header *types.WorkObject) *types.WorkObject { current := types.CopyWorkObject(header) diff --git a/core/slice.go b/core/slice.go index e9ff11c6e1..283c145629 100644 --- a/core/slice.go +++ b/core/slice.go @@ -353,13 +353,15 @@ func (sl *Slice) Append(header *types.WorkObject, domPendingHeader *types.WorkOb setHead = sl.poem(sl.engine.TotalLogS(sl.hc, block), sl.engine.TotalLogS(sl.hc, sl.hc.CurrentHeader())) - err = sl.hc.SetCurrentState(block) - if err != nil { - sl.logger.WithFields(log.Fields{ - "err": err, - "Hash": block.Hash(), - }).Error("Error setting current state") - return nil, false, err + if setHead { + err := sl.hc.SetCurrentHeader(block) + if err != nil { + sl.logger.WithFields(log.Fields{ + "err": err, + "Hash": block.Hash(), + }).Error("Error setting current header") + return nil, false, err + } } // Upate the local pending header @@ -400,7 +402,8 @@ func (sl *Slice) Append(header *types.WorkObject, domPendingHeader *types.WorkOb time12 := common.PrettyDuration(time.Since(start)) - if setHead { + // SetCurrentHeader for prime and region is already called above + if setHead && nodeCtx != common.ZONE_CTX { sl.hc.SetCurrentHeader(block) } else if !setHead && nodeCtx == common.ZONE_CTX && sl.hc.ProcessingState() { sl.logger.WithFields(log.Fields{ @@ -1115,11 +1118,29 @@ func (sl *Slice) updatePhCacheFromDom(pendingHeader types.PendingHeader, termini }).Info("Choosing phHeader pickPhHead") parent := sl.hc.GetBlockByHash(localPendingHeader.WorkObject().ParentHash(nodeCtx)) if parent != nil { - sl.hc.SetCurrentHeader(parent) + err := sl.hc.SetCurrentHeader(parent) + if err != nil { + sl.logger.WithField("err", err).Error("cannot set the current header in sub relay pending header") + return nil + } + newPendingHeader, err := sl.generateSlicePendingHeader(parent, localPendingHeader.Termini(), combinedPendingHeader, true, true, false) + if err != nil { + sl.logger.WithField("err", err).Error("Error generating slice pending header") + return err + } + combinedPendingHeader = types.CopyWorkObject(newPendingHeader.WorkObject()) + sl.logger.WithFields(log.Fields{ + "NumberArray": combinedPendingHeader.Header().NumberArray(), + "ParentHash": combinedPendingHeader.ParentHash(nodeCtx), + "Terminus": localPendingHeader.Termini().DomTerminus(nodeLocation), + }).Info("Choosing phHeader pickPhHead") } else { sl.logger.WithFields(log.Fields{ "hash": localPendingHeader.WorkObject().ParentHash(nodeCtx), }).Warn("Parent not found") + // This should not be possible, we cannot update the ph cache if + // parent cannot be found + return nil } sl.WriteBestPhKey(localPendingHeader.Termini().DomTerminus(nodeLocation)) } @@ -1257,7 +1278,11 @@ func (sl *Slice) init() error { return err } // This is just done for the startup process - sl.hc.SetCurrentHeader(genesisHeader) + err = sl.hc.SetCurrentHeader(genesisHeader) + if err != nil { + sl.logger.WithField("err", err).Error("Error setting the current header in slice init") + return err + } if sl.NodeLocation().Context() == common.PRIME_CTX { go sl.NewGenesisPendingHeader(nil, genesisHash, genesisHash) From 5d07575b180b18bf2d3215777cfb54858dbe5c46 Mon Sep 17 00:00:00 2001 From: gop Date: Tue, 20 Aug 2024 11:32:58 -0500 Subject: [PATCH 2/3] bugfix: check the distance for each workshare --- consensus/blake3pow/consensus.go | 5 ++++ consensus/blake3pow/poem.go | 39 +++--------------------- consensus/consensus.go | 4 +++ consensus/progpow/consensus.go | 5 ++++ consensus/progpow/poem.go | 39 +++--------------------- core/core.go | 6 ++++ core/headerchain.go | 51 ++++++++++++++++++++++++++++++++ core/worker.go | 4 +++ internal/quaiapi/backend.go | 1 + quai/api_backend.go | 4 +++ 10 files changed, 88 insertions(+), 70 deletions(-) diff --git a/consensus/blake3pow/consensus.go b/consensus/blake3pow/consensus.go index 64f04234ba..1364a2c866 100644 --- a/consensus/blake3pow/consensus.go +++ b/consensus/blake3pow/consensus.go @@ -255,6 +255,11 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ return err } + _, err = chain.WorkShareDistance(block, uncle) + if err != nil { + return err + } + // Verify the block's difficulty based on its timestamp and parent's difficulty // difficulty adjustment can only be checked in zone if nodeCtx == common.ZONE_CTX { diff --git a/consensus/blake3pow/poem.go b/consensus/blake3pow/poem.go index 3bd250bfcf..0dea95f5ee 100644 --- a/consensus/blake3pow/poem.go +++ b/consensus/blake3pow/poem.go @@ -196,47 +196,16 @@ func (blake3pow *Blake3pow) WorkShareLogS(chain consensus.ChainHeaderReader, wo } else { wsEntropy = new(big.Int).Set(blake3pow.IntrinsicLogS(powHash)) } - var distance int64 = 0 // Discount 2) applies to all shares regardless of the weight // a workshare cannot reference another workshare, it has to be either a block or an uncle // check that the parent hash referenced by the workshare is an uncle or a canonical block // then if its an uncle, traverse back until we hit a canonical block, other wise, use that // as a reference to calculate the distance - parent := chain.GetBlockByHash(ws.ParentHash()) - if parent == nil { - return big.NewInt(0), errors.New("error finding the parent of the work share") - } - // checking if the parent is an uncle - canonicalBlockForParentNum := chain.GetHeaderByNumber(parent.NumberU64(common.ZONE_CTX)) - if canonicalBlockForParentNum == nil { - return big.NewInt(0), errors.New("cannot find a canonical block for the parent number") - } - // If this check passes, the parent block is not a canonical block, we have to trace back - if canonicalBlockForParentNum.Hash() != parent.Hash() { - var prevBlock *types.WorkObject - var uncleDist int64 = 0 - for { - uncleDist++ - prevBlock = chain.GetBlockByHash(parent.Hash()) - if prevBlock == nil { - return big.NewInt(0), errors.New("cannot find a parent block of an uncle") - } - blockForPrevBlockNumber := chain.GetHeaderByNumber(prevBlock.NumberU64(common.ZONE_CTX)) - if blockForPrevBlockNumber == nil { - return big.NewInt(0), errors.New("cannot find a canonical block for the uncle block number") - } - if prevBlock.Hash() == blockForPrevBlockNumber.Hash() { - break - } - if uncleDist > int64(params.WorkSharesInclusionDepth) { - return big.NewInt(0), errors.New("uncle referenced by the workshare is more than WorkShareInclusionDepth distance") - } - } - distance = int64(wo.NumberU64(common.ZONE_CTX)-prevBlock.NumberU64(common.ZONE_CTX)) + uncleDist - 1 - } else { - distance = int64(wo.NumberU64(common.ZONE_CTX)-parent.NumberU64(common.ZONE_CTX)) - 1 + distance, err := chain.WorkShareDistance(wo, ws) + if err != nil { + return big.NewInt(0), err } - wsEntropy = new(big.Int).Div(wsEntropy, new(big.Int).Exp(big.NewInt(2), big.NewInt(distance), nil)) + wsEntropy = new(big.Int).Div(wsEntropy, new(big.Int).Exp(big.NewInt(2), distance, nil)) // Add the entropy into the total entropy once the discount calculation is done totalWsEntropy.Add(totalWsEntropy, wsEntropy) diff --git a/consensus/consensus.go b/consensus/consensus.go index 7877eda6b3..1c7aff72fc 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -61,6 +61,10 @@ type ChainHeaderReader interface { // WriteAddressOutpoints writes the address outpoints to the database WriteAddressOutpoints(outpointsMap map[string]map[string]*types.OutpointAndDenomination) error + + // WorkShareDistance calculates the geodesic distance between the + // workshare and the workobject in which that workshare is included. + WorkShareDistance(wo *types.WorkObject, ws *types.WorkObjectHeader) (*big.Int, error) } // ChainReader defines a small collection of methods needed to access the local diff --git a/consensus/progpow/consensus.go b/consensus/progpow/consensus.go index b012e092c8..27b9fb3e83 100644 --- a/consensus/progpow/consensus.go +++ b/consensus/progpow/consensus.go @@ -256,6 +256,11 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W return err } + _, err = chain.WorkShareDistance(block, uncle) + if err != nil { + return err + } + // Verify the block's difficulty based on its timestamp and parent's difficulty // difficulty adjustment can only be checked in zone if nodeCtx == common.ZONE_CTX { diff --git a/consensus/progpow/poem.go b/consensus/progpow/poem.go index 202c389053..0b06cca8a9 100644 --- a/consensus/progpow/poem.go +++ b/consensus/progpow/poem.go @@ -194,47 +194,16 @@ func (progpow *Progpow) WorkShareLogS(chain consensus.ChainHeaderReader, wo *typ } else { wsEntropy = new(big.Int).Set(progpow.IntrinsicLogS(powHash)) } - var distance int64 = 0 // Discount 2) applies to all shares regardless of the weight // a workshare cannot reference another workshare, it has to be either a block or an uncle // check that the parent hash referenced by the workshare is an uncle or a canonical block // then if its an uncle, traverse back until we hit a canonical block, other wise, use that // as a reference to calculate the distance - parent := chain.GetBlockByHash(ws.ParentHash()) - if parent == nil { - return big.NewInt(0), errors.New("error finding the parent of the work share") - } - // checking if the parent is an uncle - canonicalBlockForParentNum := chain.GetHeaderByNumber(parent.NumberU64(common.ZONE_CTX)) - if canonicalBlockForParentNum == nil { - return big.NewInt(0), errors.New("cannot find a canonical block for the parent number") - } - // If this check passes, the parent block is not a canonical block, we have to trace back - if canonicalBlockForParentNum.Hash() != parent.Hash() { - var prevBlock *types.WorkObject - var uncleDist int64 = 0 - for { - uncleDist++ - prevBlock = chain.GetBlockByHash(parent.Hash()) - if prevBlock == nil { - return big.NewInt(0), errors.New("cannot find a parent block of an uncle") - } - blockForPrevBlockNumber := chain.GetHeaderByNumber(prevBlock.NumberU64(common.ZONE_CTX)) - if blockForPrevBlockNumber == nil { - return big.NewInt(0), errors.New("cannot find a canonical block for the uncle block number") - } - if prevBlock.Hash() == blockForPrevBlockNumber.Hash() { - break - } - if uncleDist > int64(params.WorkSharesInclusionDepth) { - return big.NewInt(0), errors.New("uncle referenced by the workshare is more than WorkShareInclusionDepth distance") - } - } - distance = int64(wo.NumberU64(common.ZONE_CTX)-prevBlock.NumberU64(common.ZONE_CTX)) + uncleDist - 1 - } else { - distance = int64(wo.NumberU64(common.ZONE_CTX)-parent.NumberU64(common.ZONE_CTX)) - 1 + distance, err := chain.WorkShareDistance(wo, ws) + if err != nil { + return big.NewInt(0), err } - wsEntropy = new(big.Int).Div(wsEntropy, new(big.Int).Exp(big.NewInt(2), big.NewInt(distance), nil)) + wsEntropy = new(big.Int).Div(wsEntropy, new(big.Int).Exp(big.NewInt(2), distance, nil)) // Add the entropy into the total entropy once the discount calculation is done totalWsEntropy.Add(totalWsEntropy, wsEntropy) } diff --git a/core/core.go b/core/core.go index 241590a950..d2c010b5b1 100644 --- a/core/core.go +++ b/core/core.go @@ -894,6 +894,12 @@ func (c *Core) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCano return c.sl.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) } +// WorkShareDistance calculates the geodesic distance between the +// workshare and the workobject in which that workshare is included. +func (c *Core) WorkShareDistance(wo *types.WorkObject, ws *types.WorkObjectHeader) (*big.Int, error) { + return c.sl.hc.WorkShareDistance(wo, ws) +} + // Genesis retrieves the chain's genesis block. func (c *Core) Genesis() *types.WorkObject { return c.GetBlockByHash(c.sl.hc.genesisHeader.Hash()) diff --git a/core/headerchain.go b/core/headerchain.go index db4ca87458..aba5582050 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -474,6 +474,57 @@ func (hc *HeaderChain) findCommonAncestor(header *types.WorkObject) *types.WorkO } } +func (hc *HeaderChain) WorkShareDistance(wo *types.WorkObject, ws *types.WorkObjectHeader) (*big.Int, error) { + current := wo + // Create a list of ancestor blocks to the work object + ancestors := []*types.WorkObject{} + for i := 0; i < params.WorkSharesInclusionDepth; i++ { + parent := hc.GetBlockByHash(current.ParentHash(common.ZONE_CTX)) + if parent == nil { + return big.NewInt(0), errors.New("error finding the parent") + } + ancestors = append(ancestors, parent) + current = parent + } + + // checks if the wo is in the ancestors list + checkInAncestorsList := func(wo *types.WorkObject) bool { + for _, wObject := range ancestors { + if wo.Hash() == wObject.Hash() { + return true + } + } + return false + } + + var distance int64 = 0 + // trace back from the workshare and check if any of the parents exist in + // the ancestors list + parentHash := ws.ParentHash() + for { + parent := hc.GetBlockByHash(parentHash) + if parent == nil { + return big.NewInt(0), errors.New("error finding the parent") + } + if checkInAncestorsList(parent) { + distance += int64(wo.NumberU64(common.ZONE_CTX) - parent.NumberU64(common.ZONE_CTX) - 1) + break + } + distance++ + // If distance is greater than the WorkSharesInclusionDepth, exit the for loop + if distance > int64(params.WorkSharesInclusionDepth) { + break + } + parentHash = parent.ParentHash(common.ZONE_CTX) + } + + // If distance is greater than the WorkSharesInclusionDepth, reject the workshare + if distance > int64(params.WorkSharesInclusionDepth) { + return big.NewInt(0), errors.New("workshare is at distance more than WorkSharesInclusionDepth") + } + + return big.NewInt(distance), nil +} func (hc *HeaderChain) AddPendingEtxs(pEtxs types.PendingEtxs) error { if !pEtxs.IsValid(trie.NewStackTrie(nil)) && !hc.IsGenesisHash(pEtxs.Header.Hash()) { diff --git a/core/worker.go b/core/worker.go index afc0a9bed7..8ea2cb2798 100644 --- a/core/worker.go +++ b/core/worker.go @@ -712,6 +712,10 @@ func (w *worker) commitUncle(env *environment, uncle *types.WorkObjectHeader) er if err != nil { workShare = true } + _, err = w.hc.WorkShareDistance(env.wo, uncle) + if err != nil { + return err + } if !workShare && (env.wo.ParentHash(w.hc.NodeCtx()) == uncle.ParentHash()) { return errors.New("uncle is sibling") } diff --git a/internal/quaiapi/backend.go b/internal/quaiapi/backend.go index 7b926e20f2..2f9a33d633 100644 --- a/internal/quaiapi/backend.go +++ b/internal/quaiapi/backend.go @@ -106,6 +106,7 @@ type Backend interface { SetDomInterface(domInterface core.CoreBackend) BroadcastWorkShare(workShare *types.WorkObjectShareView, location common.Location) error GetMaxTxInWorkShare() uint64 + WorkShareDistance(wo *types.WorkObject, ws *types.WorkObjectHeader) (*big.Int, error) BadHashExistsInChain() bool IsBlockHashABadHash(hash common.Hash) bool diff --git a/quai/api_backend.go b/quai/api_backend.go index e95aedf21b..5d09395a64 100644 --- a/quai/api_backend.go +++ b/quai/api_backend.go @@ -662,6 +662,10 @@ func (b *QuaiAPIBackend) SanityCheckWorkObjectShareViewBody(wo *types.WorkObject return b.quai.core.SanityCheckWorkObjectShareViewBody(wo) } +func (b *QuaiAPIBackend) WorkShareDistance(wo *types.WorkObject, ws *types.WorkObjectHeader) (*big.Int, error) { + return b.quai.core.WorkShareDistance(wo, ws) +} + // /////////////////////////// // /////// P2P /////////////// // /////////////////////////// From 43edebebd8dae2aaa35648273de31b1f0899875c Mon Sep 17 00:00:00 2001 From: gop Date: Tue, 20 Aug 2024 11:40:09 -0500 Subject: [PATCH 3/3] bugfix: reversed poem check in the updatePhCache for async case --- core/slice.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/slice.go b/core/slice.go index 283c145629..c9c9f77550 100644 --- a/core/slice.go +++ b/core/slice.go @@ -1180,7 +1180,7 @@ func (sl *Slice) updatePhCache(pendingHeaderWithTermini types.PendingHeader, inS if !exists { return } - if !sl.poem(bestPh.WorkObject().ParentEntropy(common.ZONE_CTX), pendingHeaderWithTermini.WorkObject().ParentEntropy(common.ZONE_CTX)) { + if !sl.poem(pendingHeaderWithTermini.WorkObject().ParentEntropy(common.ZONE_CTX), bestPh.WorkObject().ParentEntropy(common.ZONE_CTX)) { return } }