Skip to content

Commit

Permalink
Workshares are included and being able to mine
Browse files Browse the repository at this point in the history
workshares are sub shares found in the process of producing a block.
Including these sub share samples in the block makes the statistical
convergence better. So in this PR, workshares have been added into the
uncles list. The weight of the workshares is added into the parent
entropy and also parent sub delta S. But a discount based on the
reference depth and frequency of workshares is applied while adding the
entropy of the workshare.
  • Loading branch information
gameofpointers committed Apr 29, 2024
1 parent beaa3c5 commit fa314c6
Show file tree
Hide file tree
Showing 22 changed files with 356 additions and 63 deletions.
1 change: 1 addition & 0 deletions cmd/utils/hierarchical_coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ func (hc *HierarchicalCoordinator) startNode(logPath string, quaiBackend quai.Co
hc.p2p.Subscribe(location, &types.WorkObject{})
hc.p2p.Subscribe(location, common.Hash{})
hc.p2p.Subscribe(location, &types.Transaction{})
hc.p2p.Subscribe(location, &types.WorkObjectHeader{})

StartNode(stack)

Expand Down
33 changes: 25 additions & 8 deletions consensus/blake3pow/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (

// Blake3pow proof-of-work protocol constants.
var (
maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks

ContextTimeFactor = big10
Expand Down Expand Up @@ -83,7 +82,7 @@ func (blake3pow *Blake3pow) VerifyHeader(chain consensus.ChainHeaderReader, head
if chain.GetHeader(header.Hash(), number) != nil {
return nil
}
parent := chain.GetHeader(header.ParentHash(nodeCtx), number-1)
parent := chain.GetBlockByHash(header.ParentHash(nodeCtx))
if parent == nil {
return consensus.ErrUnknownAncestor
}
Expand Down Expand Up @@ -177,7 +176,7 @@ func (blake3pow *Blake3pow) verifyHeaderWorker(chain consensus.ChainHeaderReader
nodeCtx := blake3pow.config.NodeLocation.Context()
var parent *types.WorkObject
if index == 0 {
parent = chain.GetHeader(headers[0].ParentHash(nodeCtx), headers[0].NumberU64(nodeCtx)-1)
parent = chain.GetBlockByHash(headers[0].ParentHash(nodeCtx))
} else if headers[index-1].Hash() == headers[index].ParentHash(nodeCtx) {
parent = headers[index-1]
}
Expand All @@ -195,8 +194,8 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
if blake3pow.config.PowMode == ModeFullFake {
return nil
}
// Verify that there are at most 2 uncles included in this block
if len(block.Uncles()) > maxUncles {
// Verify that there are at most params.MaxWorkShareCount uncles included in this block
if len(block.Uncles()) > params.MaxWorkShareCount {
return errTooManyUncles
}
if len(block.Uncles()) == 0 {
Expand All @@ -206,7 +205,7 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.WorkObject)

number, parent := block.NumberU64(nodeCtx)-1, block.ParentHash(nodeCtx)
for i := 0; i < 7; i++ {
for i := 0; i < params.WorkSharesInclusionDepth; i++ {
ancestorHeader := chain.GetHeader(parent, number)
if ancestorHeader == nil {
break
Expand Down Expand Up @@ -244,8 +243,9 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
if ancestors[uncle.ParentHash()] == nil || uncle.ParentHash() == block.ParentHash(nodeCtx) {
return errDanglingUncle
}
// Verify the seal and get the powHash for the given header
err := blake3pow.verifySeal(uncle)

// make sure that the work can be computed
_, err := blake3pow.ComputePowHash(uncle)
if err != nil {
return err
}
Expand All @@ -258,6 +258,19 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
if expected.Cmp(uncle.Difficulty()) != 0 {
return fmt.Errorf("uncle has invalid difficulty: have %v, want %v", uncle.Difficulty(), expected)
}

// Verify that the work share number is parent's +1
parentNumber := parent.Number(nodeCtx)
if chain.IsGenesisHash(parent.Hash()) {
parentNumber = big.NewInt(0)
}
if diff := new(big.Int).Sub(uncle.Number(), parentNumber); diff.Cmp(big.NewInt(1)) != 0 {
return consensus.ErrInvalidNumber
}

if !blake3pow.CheckIfValidWorkShare(uncle) {
return errors.New("invalid workshare included")
}
}
}
return nil
Expand Down Expand Up @@ -627,3 +640,7 @@ func (blake3pow *Blake3pow) NodeLocation() common.Location {
func (blake3pow *Blake3pow) ComputePowLight(header *types.WorkObjectHeader) (common.Hash, common.Hash) {
panic("compute pow light doesnt exist for blake3")
}

func (blake3pow *Blake3pow) ComputePowHash(header *types.WorkObjectHeader) (common.Hash, error) {
return header.Hash(), nil
}
63 changes: 63 additions & 0 deletions consensus/blake3pow/poem.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,13 @@ func (blake3pow *Blake3pow) TotalLogS(chain consensus.GenesisReader, header *typ
if err != nil {
return big.NewInt(0)
}
if blake3pow.NodeLocation().Context() == common.ZONE_CTX {
workShareS, err := blake3pow.WorkShareLogS(header)
if err != nil {
return big.NewInt(0)
}
intrinsicS = new(big.Int).Add(intrinsicS, workShareS)
}
switch order {
case common.PRIME_CTX:
totalS := new(big.Int).Add(header.ParentEntropy(common.PRIME_CTX), header.ParentDeltaS(common.REGION_CTX))
Expand Down Expand Up @@ -119,6 +126,13 @@ func (blake3pow *Blake3pow) DeltaLogS(chain consensus.GenesisReader, header *typ
if err != nil {
return big.NewInt(0)
}
if blake3pow.NodeLocation().Context() == common.ZONE_CTX {
workShareS, err := blake3pow.WorkShareLogS(header)
if err != nil {
return big.NewInt(0)
}
intrinsicS = new(big.Int).Add(intrinsicS, workShareS)
}
switch order {
case common.PRIME_CTX:
return big.NewInt(0)
Expand Down Expand Up @@ -149,6 +163,41 @@ func (blake3pow *Blake3pow) UncledLogS(block *types.WorkObject) *big.Int {
return totalUncledLogS
}

func (blake3pow *Blake3pow) WorkShareLogS(wo *types.WorkObject) (*big.Int, error) {
workShares := wo.Uncles()
totalWsEntropy := big.NewInt(0)
for _, ws := range workShares {
powHash, err := blake3pow.ComputePowHash(ws)
if err != nil {
return big.NewInt(0), err
}
// compute the diff from the pow hash so that the work can be discounted
powDiff := new(big.Int).Div(common.Big2e256, new(big.Int).SetBytes(powHash.Bytes()))
// Two discounts need to be applied to the weight of each work share
// 1) Discount based on the amount of number of other possible work
// shares for the same entropy value
// 2) Discount based on the staleness of inclusion, for every block
// delay the weight gets reduced by the factor of 2

// Discount 1) only applies if the workshare has less weight than the
// work object threshold
wsEntropy := new(big.Int).Set(blake3pow.IntrinsicLogS(powHash))
woDiff := new(big.Int).Set(wo.Difficulty())
target := new(big.Int).Div(common.Big2e256, woDiff)
if new(big.Int).SetBytes(powHash.Bytes()).Cmp(target) > 0 { // powHash > target
c, _ := mathutil.BinaryLog(powDiff, mantBits)
thresoldC, _ := mathutil.BinaryLog(woDiff, mantBits)
wsEntropy = new(big.Int).Div(wsEntropy, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(thresoldC-c)), nil))
}
// Discount 2) applies to all shares regardless of the weight
wsEntropy = new(big.Int).Div(wsEntropy, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(wo.NumberU64(common.ZONE_CTX)-ws.NumberU64())), nil))

// Add the entropy into the total entropy once the discount calculation is done
totalWsEntropy.Add(totalWsEntropy, wsEntropy)
}
return totalWsEntropy, nil
}

func (blake3pow *Blake3pow) UncledSubDeltaLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int {
// Treating the genesis block differntly
if chain.IsGenesisHash(header.Hash()) {
Expand Down Expand Up @@ -203,3 +252,17 @@ func (blake3pow *Blake3pow) CalcRank(chain consensus.GenesisReader, header *type

return 0, nil
}

func (blake3pow *Blake3pow) CheckIfValidWorkShare(workShare *types.WorkObjectHeader) bool {
// Extract some data from the header
diff := new(big.Int).Set(workShare.Difficulty())
c, _ := mathutil.BinaryLog(diff, mantBits)
workShareThreshold := c - params.WorkSharesThresholdDiff
workShareDiff := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(workShareThreshold)), nil)
workShareMintarget := new(big.Int).Div(big2e256, workShareDiff)
powHash, err := blake3pow.ComputePowHash(workShare)
if err != nil {
return false
}
return new(big.Int).SetBytes(powHash.Bytes()).Cmp(workShareMintarget) <= 0
}
8 changes: 7 additions & 1 deletion consensus/blake3pow/sealer.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import (

"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/log"
"github.com/dominant-strategies/go-quai/params"
"modernc.org/mathutil"
)

const (
Expand Down Expand Up @@ -127,8 +129,12 @@ func (blake3pow *Blake3pow) Seal(header *types.WorkObject, results chan<- *types
// seed that results in correct final header difficulty.
func (blake3pow *Blake3pow) mine(header *types.WorkObject, id int, seed uint64, abort chan struct{}, found chan *types.WorkObject) {
// Extract some data from the header
diff := new(big.Int).Set(header.Difficulty())
c, _ := mathutil.BinaryLog(diff, mantBits)
workShareThreshold := c - params.WorkSharesThresholdDiff
workShareDiff := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(workShareThreshold)), nil)
var (
target = new(big.Int).Div(big2e256, header.Difficulty())
target = new(big.Int).Div(big2e256, workShareDiff)
)
// Start generating random nonces until we abort or find a good one
var (
Expand Down
13 changes: 13 additions & 0 deletions consensus/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ type ChainHeaderReader interface {
// GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.WorkObject

// GetBlockByhash retrieves a block from the database by hash.
GetBlockByHash(hash common.Hash) *types.WorkObject

// GetTerminiByHash retrieves the termini for a given header hash
GetTerminiByHash(hash common.Hash) *types.Termini

Expand Down Expand Up @@ -99,6 +102,13 @@ type Engine interface {
// UncledLogS returns the log of the entropy reduction by uncles referenced in the block
UncledLogS(block *types.WorkObject) *big.Int

// WorkShareLogS returns the log of the entropy reduction by the workshare referenced in the block
WorkShareLogS(block *types.WorkObject) (*big.Int, error)

// CheckIfValidWorkShare checks if the workshare meets the work share
// requirements defined by the protocol
CheckIfValidWorkShare(workShare *types.WorkObjectHeader) bool

// UncledUncledSubDeltaLogS returns the log of the uncled entropy reduction since the past coincident
UncledSubDeltaLogS(chain GenesisReader, header *types.WorkObject) *big.Int

Expand Down Expand Up @@ -151,6 +161,9 @@ type Engine interface {
// that a new block should have.
CalcDifficulty(chain ChainHeaderReader, parent *types.WorkObjectHeader) *big.Int

// ComputePowHash returns the pow hash of the workobject header
ComputePowHash(header *types.WorkObjectHeader) (common.Hash, error)

// IsDomCoincident returns true if this block satisfies the difficulty order
// of a dominant chain. If this node does not have a dominant chain (i.e.
// if this is a prime node), then the function will always return false.
Expand Down
38 changes: 32 additions & 6 deletions consensus/progpow/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (

// Progpow proof-of-work protocol constants.
var (
maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks

ContextTimeFactor = big10
Expand Down Expand Up @@ -197,8 +196,8 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
if progpow.config.PowMode == ModeFullFake {
return nil
}
// Verify that there are at most 2 uncles included in this block
if len(block.Uncles()) > maxUncles {
// Verify that there are at most params.MaxWorkShareCount uncles included in this block
if len(block.Uncles()) > params.MaxWorkShareCount {
return errTooManyUncles
}
if len(block.Uncles()) == 0 {
Expand All @@ -208,7 +207,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.WorkObject)

number, parent := block.NumberU64(nodeCtx)-1, block.ParentHash(nodeCtx)
for i := 0; i < 7; i++ {
for i := 0; i < params.WorkSharesInclusionDepth; i++ {
ancestorHeader := chain.GetHeader(parent, number)
if ancestorHeader == nil {
break
Expand Down Expand Up @@ -246,8 +245,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
if ancestors[uncle.ParentHash()] == nil || uncle.ParentHash() == block.ParentHash(nodeCtx) {
return errDanglingUncle
}
// Verify the seal and get the powHash for the given header
_, err := progpow.verifySeal(uncle)
_, err := progpow.ComputePowHash(uncle)
if err != nil {
return err
}
Expand All @@ -260,6 +258,20 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
if expected.Cmp(uncle.Difficulty()) != 0 {
return fmt.Errorf("uncle has invalid difficulty: have %v, want %v", uncle.Difficulty(), expected)
}

// Verify that the work share number is parent's +1
parentNumber := parent.Number(nodeCtx)
if chain.IsGenesisHash(parent.Hash()) {
parentNumber = big.NewInt(0)
}
if diff := new(big.Int).Sub(uncle.Number(), parentNumber); diff.Cmp(big.NewInt(1)) != 0 {
return consensus.ErrInvalidNumber
}

if !progpow.CheckIfValidWorkShare(uncle) {
return errors.New("invalid workshare included")
}

}
}
return nil
Expand Down Expand Up @@ -590,6 +602,20 @@ func (progpow *Progpow) verifySeal(header *types.WorkObjectHeader) (common.Hash,
return powHash.(common.Hash), nil
}

func (progpow *Progpow) ComputePowHash(header *types.WorkObjectHeader) (common.Hash, error) {
// Check progpow
mixHash := header.PowDigest.Load()
powHash := header.PowHash.Load()
if powHash == nil || mixHash == nil {
mixHash, powHash = progpow.ComputePowLight(header)
}
// Verify the calculated values against the ones provided in the header
if !bytes.Equal(header.MixHash().Bytes(), mixHash.(common.Hash).Bytes()) {
return common.Hash{}, errInvalidMixHash
}
return powHash.(common.Hash), nil
}

// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the progpow protocol. The changes are done inline.
func (progpow *Progpow) Prepare(chain consensus.ChainHeaderReader, header *types.WorkObject, parent *types.WorkObject) error {
Expand Down
Loading

0 comments on commit fa314c6

Please sign in to comment.