Skip to content

Commit

Permalink
Workshares are included and being able to mine
Browse files Browse the repository at this point in the history
workshares are sub shares found in the process of producing a block.
Including these sub share samples in the block makes the statistical
convergence better. So in this PR, workshares have been added into the
uncles list. The weight of the workshares is added into the parent
entropy and also parent sub delta S. But a discount based on the
reference depth and frequency of workshares is applied while adding the
entropy of the workshare.
  • Loading branch information
gameofpointers committed Apr 26, 2024
1 parent beaa3c5 commit 1e93816
Show file tree
Hide file tree
Showing 22 changed files with 307 additions and 58 deletions.
1 change: 1 addition & 0 deletions cmd/utils/hierarchical_coordinator.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ func (hc *HierarchicalCoordinator) startNode(logPath string, quaiBackend quai.Co
hc.p2p.Subscribe(location, &types.WorkObject{})
hc.p2p.Subscribe(location, common.Hash{})
hc.p2p.Subscribe(location, &types.Transaction{})
hc.p2p.Subscribe(location, &types.WorkObjectHeader{})

StartNode(stack)

Expand Down
23 changes: 18 additions & 5 deletions consensus/blake3pow/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (

// Blake3pow proof-of-work protocol constants.
var (
maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks

ContextTimeFactor = big10
Expand Down Expand Up @@ -196,7 +195,7 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
return nil
}
// Verify that there are at most 2 uncles included in this block
if len(block.Uncles()) > maxUncles {
if len(block.Uncles()) > params.MaxUncleCount {
return errTooManyUncles
}
if len(block.Uncles()) == 0 {
Expand All @@ -206,7 +205,7 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.WorkObject)

number, parent := block.NumberU64(nodeCtx)-1, block.ParentHash(nodeCtx)
for i := 0; i < 7; i++ {
for i := 0; i < params.WorkSharesInclusionDepth; i++ {
ancestorHeader := chain.GetHeader(parent, number)
if ancestorHeader == nil {
break
Expand Down Expand Up @@ -244,8 +243,9 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
if ancestors[uncle.ParentHash()] == nil || uncle.ParentHash() == block.ParentHash(nodeCtx) {
return errDanglingUncle
}
// Verify the seal and get the powHash for the given header
err := blake3pow.verifySeal(uncle)

// make sure that the work can be computed
_, err := blake3pow.ComputePowHash(uncle)
if err != nil {
return err
}
Expand All @@ -258,6 +258,15 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
if expected.Cmp(uncle.Difficulty()) != 0 {
return fmt.Errorf("uncle has invalid difficulty: have %v, want %v", uncle.Difficulty(), expected)
}

// Verify that the block number is parent's +1
parentNumber := parent.Number(nodeCtx)
if chain.IsGenesisHash(parent.Hash()) {
parentNumber = big.NewInt(0)
}
if diff := new(big.Int).Sub(uncle.Number(), parentNumber); diff.Cmp(big.NewInt(1)) != 0 {
return consensus.ErrInvalidNumber
}
}
}
return nil
Expand Down Expand Up @@ -627,3 +636,7 @@ func (blake3pow *Blake3pow) NodeLocation() common.Location {
func (blake3pow *Blake3pow) ComputePowLight(header *types.WorkObjectHeader) (common.Hash, common.Hash) {
panic("compute pow light doesnt exist for blake3")
}

func (blake3pow *Blake3pow) ComputePowHash(header *types.WorkObjectHeader) (common.Hash, error) {
return header.Hash(), nil
}
47 changes: 47 additions & 0 deletions consensus/blake3pow/poem.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,11 @@ func (blake3pow *Blake3pow) TotalLogS(chain consensus.GenesisReader, header *typ
if err != nil {
return big.NewInt(0)
}
workShareS, err := blake3pow.WorkShareLogS(header)
if err != nil {
return big.NewInt(0)
}
intrinsicS = new(big.Int).Add(intrinsicS, workShareS)
switch order {
case common.PRIME_CTX:
totalS := new(big.Int).Add(header.ParentEntropy(common.PRIME_CTX), header.ParentDeltaS(common.REGION_CTX))
Expand Down Expand Up @@ -119,6 +124,11 @@ func (blake3pow *Blake3pow) DeltaLogS(chain consensus.GenesisReader, header *typ
if err != nil {
return big.NewInt(0)
}
workShareS, err := blake3pow.WorkShareLogS(header)
if err != nil {
return big.NewInt(0)
}
intrinsicS = new(big.Int).Add(intrinsicS, workShareS)
switch order {
case common.PRIME_CTX:
return big.NewInt(0)
Expand Down Expand Up @@ -149,6 +159,43 @@ func (blake3pow *Blake3pow) UncledLogS(block *types.WorkObject) *big.Int {
return totalUncledLogS
}

func (blake3pow *Blake3pow) WorkShareLogS(wo *types.WorkObject) (*big.Int, error) {
workShares := wo.Uncles()
totalWsEntropy := big.NewInt(0)
for _, ws := range workShares {
powHash, err := blake3pow.ComputePowHash(ws)
if err != nil {
return big.NewInt(0), err
}
// compute the diff from the pow hash so that the work can be discounted
powDiff := new(big.Int).Div(common.Big2e256, new(big.Int).SetBytes(powHash.Bytes()))
// Two discounts need to be applied to the weight of each work share
// 1) Discount based on the amount of number of other possible work
// shares for the same entropy value
// 2) Discount based on the staleness of inclusion, for every block
// delay the weight gets reduced by the factor of 2

// Discount 1) only applies if the workshare has less weight than the
// work object threshold
wsDiff := new(big.Int).Set(powDiff)
woDiff := new(big.Int).Set(wo.Difficulty())
target := new(big.Int).Div(common.Big2e256, woDiff)
if new(big.Int).SetBytes(powHash.Bytes()).Cmp(target) > 0 { // powHash > target
c, _ := mathutil.BinaryLog(powDiff, mantBits)
thresoldC, _ := mathutil.BinaryLog(woDiff, mantBits)
wsDiff = new(big.Int).Div(wsDiff, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(thresoldC-c)), nil))
}
// Discount 2) applies to all shares regardless of the weight
wsDiff = new(big.Int).Div(wsDiff, big.NewInt(int64(wo.NumberU64(common.ZONE_CTX)-ws.NumberU64())))

wsTarget := new(big.Int).Div(common.Big2e256, wsDiff)
wsEntropy := blake3pow.IntrinsicLogS(common.BytesToHash(wsTarget.Bytes()))
// Add the entropy into the total entropy once the discount calculation is done
totalWsEntropy.Add(totalWsEntropy, wsEntropy)
}
return totalWsEntropy, nil
}

func (blake3pow *Blake3pow) UncledSubDeltaLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int {
// Treating the genesis block differntly
if chain.IsGenesisHash(header.Hash()) {
Expand Down
8 changes: 7 additions & 1 deletion consensus/blake3pow/sealer.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ import (

"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/log"
"github.com/dominant-strategies/go-quai/params"
"modernc.org/mathutil"
)

const (
Expand Down Expand Up @@ -127,8 +129,12 @@ func (blake3pow *Blake3pow) Seal(header *types.WorkObject, results chan<- *types
// seed that results in correct final header difficulty.
func (blake3pow *Blake3pow) mine(header *types.WorkObject, id int, seed uint64, abort chan struct{}, found chan *types.WorkObject) {
// Extract some data from the header
diff := new(big.Int).Set(header.Difficulty())
c, _ := mathutil.BinaryLog(diff, mantBits)
workShareThreshold := c - params.WorkSharesThresholdDiff
workShareDiff := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(workShareThreshold)), nil)
var (
target = new(big.Int).Div(big2e256, header.Difficulty())
target = new(big.Int).Div(big2e256, workShareDiff)
)
// Start generating random nonces until we abort or find a good one
var (
Expand Down
6 changes: 6 additions & 0 deletions consensus/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,9 @@ type Engine interface {
// UncledLogS returns the log of the entropy reduction by uncles referenced in the block
UncledLogS(block *types.WorkObject) *big.Int

// WorkShareLogS returns the log of the entropy reduction by the workshare referenced in the block
WorkShareLogS(block *types.WorkObject) (*big.Int, error)

// UncledUncledSubDeltaLogS returns the log of the uncled entropy reduction since the past coincident
UncledSubDeltaLogS(chain GenesisReader, header *types.WorkObject) *big.Int

Expand Down Expand Up @@ -151,6 +154,9 @@ type Engine interface {
// that a new block should have.
CalcDifficulty(chain ChainHeaderReader, parent *types.WorkObjectHeader) *big.Int

// ComputePowHash returns the pow hash of the workobject header
ComputePowHash(header *types.WorkObjectHeader) (common.Hash, error)

// IsDomCoincident returns true if this block satisfies the difficulty order
// of a dominant chain. If this node does not have a dominant chain (i.e.
// if this is a prime node), then the function will always return false.
Expand Down
31 changes: 26 additions & 5 deletions consensus/progpow/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (

// Progpow proof-of-work protocol constants.
var (
maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks

ContextTimeFactor = big10
Expand Down Expand Up @@ -198,7 +197,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
return nil
}
// Verify that there are at most 2 uncles included in this block
if len(block.Uncles()) > maxUncles {
if len(block.Uncles()) > params.MaxUncleCount {
return errTooManyUncles
}
if len(block.Uncles()) == 0 {
Expand All @@ -208,7 +207,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.WorkObject)

number, parent := block.NumberU64(nodeCtx)-1, block.ParentHash(nodeCtx)
for i := 0; i < 7; i++ {
for i := 0; i < params.WorkSharesInclusionDepth; i++ {
ancestorHeader := chain.GetHeader(parent, number)
if ancestorHeader == nil {
break
Expand Down Expand Up @@ -246,8 +245,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
if ancestors[uncle.ParentHash()] == nil || uncle.ParentHash() == block.ParentHash(nodeCtx) {
return errDanglingUncle
}
// Verify the seal and get the powHash for the given header
_, err := progpow.verifySeal(uncle)
_, err := progpow.ComputePowHash(uncle)
if err != nil {
return err
}
Expand All @@ -260,6 +258,15 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
if expected.Cmp(uncle.Difficulty()) != 0 {
return fmt.Errorf("uncle has invalid difficulty: have %v, want %v", uncle.Difficulty(), expected)
}

// Verify that the block number is parent's +1
parentNumber := parent.Number(nodeCtx)
if chain.IsGenesisHash(parent.Hash()) {
parentNumber = big.NewInt(0)
}
if diff := new(big.Int).Sub(uncle.Number(), parentNumber); diff.Cmp(big.NewInt(1)) != 0 {
return consensus.ErrInvalidNumber
}
}
}
return nil
Expand Down Expand Up @@ -590,6 +597,20 @@ func (progpow *Progpow) verifySeal(header *types.WorkObjectHeader) (common.Hash,
return powHash.(common.Hash), nil
}

func (progpow *Progpow) ComputePowHash(header *types.WorkObjectHeader) (common.Hash, error) {
// Check progpow
mixHash := header.PowDigest.Load()
powHash := header.PowHash.Load()
if powHash == nil || mixHash == nil {
mixHash, powHash = progpow.ComputePowLight(header)
}
// Verify the calculated values against the ones provided in the header
if !bytes.Equal(header.MixHash().Bytes(), mixHash.(common.Hash).Bytes()) {
return common.Hash{}, errInvalidMixHash
}
return powHash.(common.Hash), nil
}

// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the progpow protocol. The changes are done inline.
func (progpow *Progpow) Prepare(chain consensus.ChainHeaderReader, header *types.WorkObject, parent *types.WorkObject) error {
Expand Down
50 changes: 50 additions & 0 deletions consensus/progpow/poem.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,11 @@ func (progpow *Progpow) TotalLogS(chain consensus.GenesisReader, header *types.W
if err != nil {
return big.NewInt(0)
}
workShareS, err := progpow.WorkShareLogS(header)
if err != nil {
return big.NewInt(0)
}
intrinsicS = new(big.Int).Add(intrinsicS, workShareS)
switch order {
case common.PRIME_CTX:
totalS := new(big.Int).Add(header.ParentEntropy(common.PRIME_CTX), header.ParentDeltaS(common.REGION_CTX))
Expand Down Expand Up @@ -111,10 +116,18 @@ func (progpow *Progpow) TotalLogPhS(header *types.WorkObject) *big.Int {
}

func (progpow *Progpow) DeltaLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int {
if chain.IsGenesisHash(header.Hash()) {
return big.NewInt(0)
}
intrinsicS, order, err := progpow.CalcOrder(header)
if err != nil {
return big.NewInt(0)
}
workShareS, err := progpow.WorkShareLogS(header)
if err != nil {
return big.NewInt(0)
}
intrinsicS = new(big.Int).Add(intrinsicS, workShareS)
switch order {
case common.PRIME_CTX:
return big.NewInt(0)
Expand Down Expand Up @@ -145,6 +158,43 @@ func (progpow *Progpow) UncledLogS(block *types.WorkObject) *big.Int {
return totalUncledLogS
}

func (progpow *Progpow) WorkShareLogS(wo *types.WorkObject) (*big.Int, error) {
workShares := wo.Uncles()
totalWsEntropy := big.NewInt(0)
for _, ws := range workShares {
powHash, err := progpow.ComputePowHash(ws)
if err != nil {
return big.NewInt(0), err
}
// compute the diff from the pow hash so that the work can be discounted
powDiff := new(big.Int).Div(common.Big2e256, new(big.Int).SetBytes(powHash.Bytes()))
// Two discounts need to be applied to the weight of each work share
// 1) Discount based on the amount of number of other possible work
// shares for the same entropy value
// 2) Discount based on the staleness of inclusion, for every block
// delay the weight gets reduced by the factor of 2

// Discount 1) only applies if the workshare has less weight than the
// work object threshold
wsDiff := new(big.Int).Set(powDiff)
woDiff := new(big.Int).Set(wo.Difficulty())
target := new(big.Int).Div(common.Big2e256, woDiff)
if new(big.Int).SetBytes(powHash.Bytes()).Cmp(target) > 0 { // powHash > target
c, _ := mathutil.BinaryLog(powDiff, mantBits)
thresoldC, _ := mathutil.BinaryLog(woDiff, mantBits)
wsDiff = new(big.Int).Div(wsDiff, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(thresoldC-c)), nil))
}
// Discount 2) applies to all shares regardless of the weight
wsDiff = new(big.Int).Div(wsDiff, big.NewInt(int64(wo.NumberU64(common.ZONE_CTX)-ws.NumberU64())))

wsTarget := new(big.Int).Div(common.Big2e256, wsDiff)
wsEntropy := progpow.IntrinsicLogS(common.BytesToHash(wsTarget.Bytes()))
// Add the entropy into the total entropy once the discount calculation is done
totalWsEntropy.Add(totalWsEntropy, wsEntropy)
}
return totalWsEntropy, nil
}

func (progpow *Progpow) UncledSubDeltaLogS(chain consensus.GenesisReader, header *types.WorkObject) *big.Int {
// Treating the genesis block differntly
if chain.IsGenesisHash(header.Hash()) {
Expand Down
8 changes: 7 additions & 1 deletion consensus/progpow/sealer.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ import (
"github.com/dominant-strategies/go-quai/common"
"github.com/dominant-strategies/go-quai/core/types"
"github.com/dominant-strategies/go-quai/log"
"github.com/dominant-strategies/go-quai/params"
"modernc.org/mathutil"
)

const (
Expand Down Expand Up @@ -128,8 +130,12 @@ func (progpow *Progpow) Seal(header *types.WorkObject, results chan<- *types.Wor
// seed that results in correct final block difficulty.
func (progpow *Progpow) mine(header *types.WorkObject, id int, seed uint64, abort chan struct{}, found chan *types.WorkObject) {
// Extract some data from the header
diff := new(big.Int).Set(header.Difficulty())
c, _ := mathutil.BinaryLog(diff, mantBits)
workShareThreshold := c - params.WorkSharesThresholdDiff
workShareDiff := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(workShareThreshold)), nil)
var (
target = new(big.Int).Div(big2e256, header.Difficulty())
target = new(big.Int).Div(big2e256, workShareDiff)
nodeCtx = progpow.config.NodeLocation.Context()
)
// Start generating random nonces until we abort or find a good one
Expand Down
4 changes: 4 additions & 0 deletions core/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -1037,6 +1037,10 @@ func (c *Core) SubscribePendingHeader(ch chan<- *types.WorkObject) event.Subscri

func (c *Core) IsMining() bool { return c.sl.miner.Mining() }

func (c *Core) SendWorkShare(workShare *types.WorkObjectHeader) error {
return c.sl.miner.worker.AddWorkShare(workShare)
}

//-------------------------//
// State Processor methods //
//-------------------------//
Expand Down
3 changes: 1 addition & 2 deletions core/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,7 @@ type ChainEvent struct {
}

type ChainSideEvent struct {
Blocks []*types.WorkObject
ResetUncles bool
Blocks []*types.WorkObject
}

type ChainHeadEvent struct {
Expand Down
2 changes: 1 addition & 1 deletion core/headerchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.WorkObject) error {
blocks = append(blocks, block)
}
}
hc.chainSideFeed.Send(ChainSideEvent{Blocks: blocks, ResetUncles: true})
hc.chainSideFeed.Send(ChainSideEvent{Blocks: blocks})
}()
}

Expand Down
2 changes: 1 addition & 1 deletion core/slice.go
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ func (sl *Slice) Append(header *types.WorkObject, domPendingHeader *types.WorkOb
"location": block.Location(),
"parentHash": block.ParentHash(nodeCtx),
}).Debug("Found uncle")
sl.hc.chainSideFeed.Send(ChainSideEvent{Blocks: []*types.WorkObject{block}, ResetUncles: false})
sl.hc.chainSideFeed.Send(ChainSideEvent{Blocks: []*types.WorkObject{block}})
}

// Chain head feed is only used by the Zone chains
Expand Down
Loading

0 comments on commit 1e93816

Please sign in to comment.