Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

QIP8: First block difficulty should be based on current expansion #1711

Merged
merged 3 commits into from
May 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 8 additions & 4 deletions common/big.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ var (
Big256 = big.NewInt(256)
Big257 = big.NewInt(257)
Big2e256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
Big2e64 = new(big.Int).Exp(big.NewInt(2), big.NewInt(64), big.NewInt(0))
)

func BigBitsToBits(original *big.Int) *big.Int {
e2e64 := big.NewInt(0).Exp(big.NewInt(2), big.NewInt(64), nil)
return big.NewInt(0).Div(original, e2e64)
return big.NewInt(0).Div(original, Big2e64)
}

func BitsToBigBits(original *big.Int) *big.Int {
Expand All @@ -48,11 +48,15 @@ func BitsToBigBits(original *big.Int) *big.Int {
}

func BigBitsArrayToBitsArray(original []*big.Int) []*big.Int {
e2e64 := big.NewInt(0).Exp(big.NewInt(2), big.NewInt(64), nil)
bitsArray := make([]*big.Int, len(original))
for i, bits := range original {
bitsArray[i] = big.NewInt(0).Div(bits, e2e64)
bitsArray[i] = big.NewInt(0).Div(bits, Big2e64)
}

return bitsArray
}

func EntropyBigBitsToDifficultyBits(bigBits *big.Int) *big.Int {
twopowerBits := new(big.Int).Exp(big.NewInt(2), new(big.Int).Div(bigBits, Big2e64), nil)
return new(big.Int).Div(Big2e256, twopowerBits)
}
9 changes: 6 additions & 3 deletions consensus/blake3pow/blake3pow.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ type Config struct {
NotifyFull bool

Log *log.Logger `toml:"-"`
// Number of threads to mine on if mining
NumThreads int
}

// Blake3pow is a proof-of-work consensus engine using the blake3 hash algorithm
Expand Down Expand Up @@ -70,9 +72,10 @@ type Blake3pow struct {
// packages.
func New(config Config, notify []string, noverify bool, logger *log.Logger) *Blake3pow {
blake3pow := &Blake3pow{
config: config,
update: make(chan struct{}),
logger: logger,
config: config,
update: make(chan struct{}),
logger: logger,
threads: config.NumThreads,
}
if config.PowMode == ModeShared {
blake3pow.shared = sharedBlake3pow
Expand Down
25 changes: 20 additions & 5 deletions consensus/blake3pow/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *typ
// difficulty adjustment can only be checked in zone
if nodeCtx == common.ZONE_CTX {
parent := chain.GetHeaderByHash(uncle.ParentHash())
expected := blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader())
expected := blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader(), block.ExpansionNumber())
if expected.Cmp(uncle.Difficulty()) != 0 {
return fmt.Errorf("uncle has invalid difficulty: have %v, want %v", uncle.Difficulty(), expected)
}
Expand Down Expand Up @@ -300,7 +300,7 @@ func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, head
// Verify the block's difficulty based on its timestamp and parent's difficulty
// difficulty adjustment can only be checked in zone
if nodeCtx == common.ZONE_CTX {
expected := blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader())
expected := blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader(), header.ExpansionNumber())
if expected.Cmp(header.Difficulty()) != 0 {
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty(), expected)
}
Expand Down Expand Up @@ -484,7 +484,7 @@ func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, head
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.WorkObjectHeader) *big.Int {
func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.WorkObjectHeader, expansionNum uint8) *big.Int {
nodeCtx := blake3pow.config.NodeLocation.Context()

if nodeCtx != common.ZONE_CTX {
Expand All @@ -499,7 +499,22 @@ func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, pa

if chain.IsGenesisHash(parent.Hash()) {
// Divide the parent difficulty by the number of slices running at the time of expansion
return parent.Difficulty()
if expansionNum == 0 && parent.Location().Equal(common.Location{}) {
// Base case: expansion number is 0 and the parent is the actual genesis block
return parent.Difficulty()
}
genesis := chain.GetHeaderByHash(parent.Hash())
genesisTotalLogS := blake3pow.TotalLogS(chain, genesis)
if genesisTotalLogS.Cmp(genesis.ParentEntropy(common.PRIME_CTX)) < 0 { // prevent negative difficulty
blake3pow.logger.Errorf("Genesis block has invalid parent entropy: %v", genesis.ParentEntropy(common.PRIME_CTX))
return nil
}
differenceParentEntropy := new(big.Int).Sub(genesisTotalLogS, genesis.ParentEntropy(common.PRIME_CTX))
numRegionsInPrime, numZonesInRegion := common.GetHierarchySizeForExpansionNumber(expansionNum)
timeFactorMultiplied := new(big.Int).Mul(params.TimeFactor, params.TimeFactor)
numBlocks := new(big.Int).Mul(timeFactorMultiplied, new(big.Int).SetUint64(numRegionsInPrime*numZonesInRegion))
differenceParentEntropy.Div(differenceParentEntropy, numBlocks)
return common.EntropyBigBitsToDifficultyBits(differenceParentEntropy)
}

parentOfParent := chain.GetHeaderByHash(parent.ParentHash())
Expand Down Expand Up @@ -570,7 +585,7 @@ func (blake3pow *Blake3pow) verifySeal(header *types.WorkObjectHeader) error {
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the blake3pow protocol. The changes are done inline.
func (blake3pow *Blake3pow) Prepare(chain consensus.ChainHeaderReader, header *types.WorkObject, parent *types.WorkObject) error {
header.WorkObjectHeader().SetDifficulty(blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader()))
header.WorkObjectHeader().SetDifficulty(blake3pow.CalcDifficulty(chain, parent.WorkObjectHeader(), header.ExpansionNumber()))
return nil
}

Expand Down
11 changes: 6 additions & 5 deletions consensus/blake3pow/poem.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ func (blake3pow *Blake3pow) CalcOrder(header *types.WorkObject) (*big.Int, int,
if header.NumberU64(nodeCtx) == 0 {
return big0, common.PRIME_CTX, nil
}
expansionNum := header.ExpansionNumber()

// Verify the seal and get the powHash for the given header
err := blake3pow.verifySeal(header.WorkObjectHeader())
Expand All @@ -35,20 +36,20 @@ func (blake3pow *Blake3pow) CalcOrder(header *types.WorkObject) (*big.Int, int,
// the given header determines the prime block
totalDeltaSPrime := new(big.Int).Add(header.ParentDeltaS(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX))
totalDeltaSPrime = new(big.Int).Add(totalDeltaSPrime, intrinsicS)
primeDeltaSTarget := new(big.Int).Div(params.PrimeEntropyTarget, big2)
primeDeltaSTarget := new(big.Int).Div(params.PrimeEntropyTarget(expansionNum), big2)
primeDeltaSTarget = new(big.Int).Mul(zoneThresholdS, primeDeltaSTarget)

primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(params.PrimeEntropyTarget))
primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(params.PrimeEntropyTarget(expansionNum)))
if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 && totalDeltaSPrime.Cmp(primeDeltaSTarget) > 0 {
return intrinsicS, common.PRIME_CTX, nil
}

// REGION
// Compute the total accumulated entropy since the last region block
totalDeltaSRegion := new(big.Int).Add(header.ParentDeltaS(common.ZONE_CTX), intrinsicS)
regionDeltaSTarget := new(big.Int).Div(params.RegionEntropyTarget, big2)
regionDeltaSTarget := new(big.Int).Div(params.RegionEntropyTarget(expansionNum), big2)
regionDeltaSTarget = new(big.Int).Mul(zoneThresholdS, regionDeltaSTarget)
regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(params.RegionEntropyTarget))
regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(params.RegionEntropyTarget(expansionNum)))
if intrinsicS.Cmp(regionBlockEntropyThreshold) > 0 && totalDeltaSRegion.Cmp(regionDeltaSTarget) > 0 {
return intrinsicS, common.REGION_CTX, nil
}
Expand Down Expand Up @@ -256,7 +257,7 @@ func (blake3pow *Blake3pow) CalcRank(chain consensus.GenesisReader, header *type
for i := common.InterlinkDepth; i > 0; i-- {
extraBits := math.Pow(2, float64(i))
primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(big.NewInt(int64(extraBits))))
primeBlockEntropyThreshold = new(big.Int).Add(primeBlockEntropyThreshold, common.BitsToBigBits(params.PrimeEntropyTarget))
primeBlockEntropyThreshold = new(big.Int).Add(primeBlockEntropyThreshold, common.BitsToBigBits(params.PrimeEntropyTarget(header.ExpansionNumber())))
if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 {
return i, nil
}
Expand Down
7 changes: 4 additions & 3 deletions consensus/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ type Engine interface {

// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have.
CalcDifficulty(chain ChainHeaderReader, parent *types.WorkObjectHeader) *big.Int
CalcDifficulty(chain ChainHeaderReader, parent *types.WorkObjectHeader, expansionNum uint8) *big.Int

// ComputePowHash returns the pow hash of the workobject header
ComputePowHash(header *types.WorkObjectHeader) (common.Hash, error)
Expand All @@ -175,11 +175,12 @@ type Engine interface {
// VerifySeal computes the PowHash and checks if work meets the difficulty
// requirement specified in header
VerifySeal(header *types.WorkObjectHeader) (common.Hash, error)

SetThreads(threads int)
}

func TargetToDifficulty(target *big.Int) *big.Int {
big2e256 := new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // 2^256
return new(big.Int).Div(big2e256, target)
return new(big.Int).Div(common.Big2e256, target)
}

func DifficultyToTarget(difficulty *big.Int) *big.Int {
Expand Down
27 changes: 21 additions & 6 deletions consensus/progpow/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func (progpow *Progpow) VerifyUncles(chain consensus.ChainReader, block *types.W
// difficulty adjustment can only be checked in zone
if nodeCtx == common.ZONE_CTX {
parent := chain.GetHeaderByHash(uncle.ParentHash())
expected := progpow.CalcDifficulty(chain, parent.WorkObjectHeader())
expected := progpow.CalcDifficulty(chain, parent.WorkObjectHeader(), block.ExpansionNumber())
if expected.Cmp(uncle.Difficulty()) != 0 {
return fmt.Errorf("uncle has invalid difficulty: have %v, want %v", uncle.Difficulty(), expected)
}
Expand Down Expand Up @@ -301,7 +301,7 @@ func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header,
// Verify the block's difficulty based on its timestamp and parent's difficulty
// difficulty adjustment can only be checked in zone
if nodeCtx == common.ZONE_CTX {
expected := progpow.CalcDifficulty(chain, parent.WorkObjectHeader())
expected := progpow.CalcDifficulty(chain, parent.WorkObjectHeader(), header.ExpansionNumber())
if expected.Cmp(header.Difficulty()) != 0 {
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty(), expected)
}
Expand Down Expand Up @@ -482,7 +482,7 @@ func (progpow *Progpow) verifyHeader(chain consensus.ChainHeaderReader, header,
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.WorkObjectHeader) *big.Int {
func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.WorkObjectHeader, expansionNum uint8) *big.Int {
nodeCtx := progpow.NodeLocation().Context()

if nodeCtx != common.ZONE_CTX {
Expand All @@ -496,8 +496,23 @@ func (progpow *Progpow) CalcDifficulty(chain consensus.ChainHeaderReader, parent
///// Difficulty = Max(parent.Difficulty() + e * k, MinimumDifficulty)

if chain.IsGenesisHash(parent.Hash()) {
// Genesis Difficulty is the difficulty in the Genesis Block divided by the number of total slices active
return parent.Difficulty()
// Divide the parent difficulty by the number of slices running at the time of expansion
if expansionNum == 0 && parent.Location().Equal(common.Location{}) {
// Base case: expansion number is 0 and the parent is the actual genesis block
return parent.Difficulty()
}
genesis := chain.GetHeaderByHash(parent.Hash())
genesisTotalLogS := progpow.TotalLogS(chain, genesis)
if genesisTotalLogS.Cmp(genesis.ParentEntropy(common.PRIME_CTX)) < 0 { // prevent negative difficulty
progpow.logger.Errorf("Genesis block has invalid parent entropy: %v", genesis.ParentEntropy(common.PRIME_CTX))
return nil
}
differenceParentEntropy := new(big.Int).Sub(genesisTotalLogS, genesis.ParentEntropy(common.PRIME_CTX))
numRegionsInPrime, numZonesInRegion := common.GetHierarchySizeForExpansionNumber(expansionNum)
timeFactorMultiplied := new(big.Int).Mul(params.TimeFactor, params.TimeFactor)
numBlocks := new(big.Int).Mul(timeFactorMultiplied, new(big.Int).SetUint64(numRegionsInPrime*numZonesInRegion))
differenceParentEntropy.Div(differenceParentEntropy, numBlocks)
return common.EntropyBigBitsToDifficultyBits(differenceParentEntropy)
}
parentOfParent := chain.GetHeaderByHash(parent.ParentHash())
if parentOfParent == nil || chain.IsGenesisHash(parentOfParent.Hash()) {
Expand Down Expand Up @@ -619,7 +634,7 @@ func (progpow *Progpow) ComputePowHash(header *types.WorkObjectHeader) (common.H
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the progpow protocol. The changes are done inline.
func (progpow *Progpow) Prepare(chain consensus.ChainHeaderReader, header *types.WorkObject, parent *types.WorkObject) error {
header.WorkObjectHeader().SetDifficulty(progpow.CalcDifficulty(chain, parent.WorkObjectHeader()))
header.WorkObjectHeader().SetDifficulty(progpow.CalcDifficulty(chain, parent.WorkObjectHeader(), header.ExpansionNumber()))
return nil
}

Expand Down
11 changes: 6 additions & 5 deletions consensus/progpow/poem.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ func (progpow *Progpow) CalcOrder(header *types.WorkObject) (*big.Int, int, erro
if header.NumberU64(nodeCtx) == 0 {
return big0, common.PRIME_CTX, nil
}
expansionNum := header.ExpansionNumber()

// Verify the seal and get the powHash for the given header
powHash, err := progpow.verifySeal(header.WorkObjectHeader())
Expand All @@ -36,20 +37,20 @@ func (progpow *Progpow) CalcOrder(header *types.WorkObject) (*big.Int, int, erro
// the given header determines the prime block
totalDeltaSPrime := new(big.Int).Add(header.ParentDeltaS(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX))
totalDeltaSPrime = new(big.Int).Add(totalDeltaSPrime, intrinsicS)
primeDeltaSTarget := new(big.Int).Div(params.PrimeEntropyTarget, big2)
primeDeltaSTarget := new(big.Int).Div(params.PrimeEntropyTarget(expansionNum), big2)
primeDeltaSTarget = new(big.Int).Mul(zoneThresholdS, primeDeltaSTarget)

primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(params.PrimeEntropyTarget))
primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(params.PrimeEntropyTarget(expansionNum)))
if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 && totalDeltaSPrime.Cmp(primeDeltaSTarget) > 0 {
return intrinsicS, common.PRIME_CTX, nil
}

// REGION
// Compute the total accumulated entropy since the last region block
totalDeltaSRegion := new(big.Int).Add(header.ParentDeltaS(common.ZONE_CTX), intrinsicS)
regionDeltaSTarget := new(big.Int).Div(params.RegionEntropyTarget, big2)
regionDeltaSTarget := new(big.Int).Div(params.RegionEntropyTarget(expansionNum), big2)
regionDeltaSTarget = new(big.Int).Mul(zoneThresholdS, regionDeltaSTarget)
regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(params.RegionEntropyTarget))
regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(params.RegionEntropyTarget(expansionNum)))
if intrinsicS.Cmp(regionBlockEntropyThreshold) > 0 && totalDeltaSRegion.Cmp(regionDeltaSTarget) > 0 {
return intrinsicS, common.REGION_CTX, nil
}
Expand Down Expand Up @@ -262,7 +263,7 @@ func (progpow *Progpow) CalcRank(chain consensus.GenesisReader, header *types.Wo
for i := common.InterlinkDepth; i > 0; i-- {
extraBits := math.Pow(2, float64(i))
primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, common.BitsToBigBits(big.NewInt(int64(extraBits))))
primeBlockEntropyThreshold = new(big.Int).Add(primeBlockEntropyThreshold, common.BitsToBigBits(params.PrimeEntropyTarget))
primeBlockEntropyThreshold = new(big.Int).Add(primeBlockEntropyThreshold, common.BitsToBigBits(params.PrimeEntropyTarget(header.ExpansionNumber())))
if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 {
return i, nil
}
Expand Down
11 changes: 7 additions & 4 deletions consensus/progpow/progpow.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,8 @@ type Config struct {
NotifyFull bool

Log *log.Logger `toml:"-"`
// Number of threads to mine on if mining
NumThreads int
}

// Progpow is a proof-of-work consensus engine using the blake3 hash algorithm
Expand Down Expand Up @@ -196,10 +198,11 @@ func New(config Config, notify []string, noverify bool, logger *log.Logger) *Pro
}).Info("Disk storage enabled for ethash caches")
}
progpow := &Progpow{
config: config,
caches: newlru("cache", config.CachesInMem, newCache, logger),
update: make(chan struct{}),
logger: logger,
config: config,
caches: newlru("cache", config.CachesInMem, newCache, logger),
update: make(chan struct{}),
logger: logger,
threads: config.NumThreads,
}
if config.PowMode == ModeShared {
progpow.shared = sharedProgpow
Expand Down
15 changes: 13 additions & 2 deletions params/protocol_params.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,12 +196,23 @@ var (
TimeFactor = big.NewInt(7)
TimeToStartTx uint64 = 0 * BlocksPerDay
BlocksPerDay uint64 = new(big.Int).Div(big.NewInt(86400), DurationLimit).Uint64() // BlocksPerDay is the number of blocks per day assuming 12 second block time
PrimeEntropyTarget = big.NewInt(441) // This is TimeFactor*TimeFactor*common.NumZonesInRegion*common.NumRegionsInPrime
RegionEntropyTarget = big.NewInt(21) // This is TimeFactor*common.NumZonesInRegion
DifficultyAdjustmentPeriod = big.NewInt(360) // This is the number of blocks over which the average has to be taken
DifficultyAdjustmentFactor int64 = 40 // This is the factor that divides the log of the change in the difficulty
MinQuaiConversionAmount = new(big.Int).Mul(big.NewInt(1), big.NewInt(GWei)) // 0.000000001 Quai
MaxWorkShareCount = 16
WorkSharesThresholdDiff = 3 // Number of bits lower than the target that the default consensus engine uses
WorkSharesInclusionDepth = 7 // Number of blocks upto which the work shares can be referenced and this is protocol enforced
)

// This is TimeFactor*TimeFactor*common.NumZonesInRegion*common.NumRegionsInPrime
func PrimeEntropyTarget(expansionNum uint8) *big.Int {
numRegions, numZones := common.GetHierarchySizeForExpansionNumber(expansionNum)
timeFactorMultiplied := new(big.Int).Mul(TimeFactor, TimeFactor)
return new(big.Int).Mul(timeFactorMultiplied, new(big.Int).SetUint64(numZones*numRegions))
}

// This is TimeFactor*common.NumZonesInRegion
func RegionEntropyTarget(expansionNum uint8) *big.Int {
_, numZones := common.GetHierarchySizeForExpansionNumber(expansionNum)
return new(big.Int).Mul(TimeFactor, new(big.Int).SetUint64(numZones))
}
Loading