From f22eeb71e5e39c9d425c16c89c137724b0d4baaf Mon Sep 17 00:00:00 2001 From: gop Date: Wed, 19 Jul 2023 10:41:40 -0500 Subject: [PATCH] Added the blake3 consensus engine back in If in flags consensus.engine is set to "blake3", blake3 consensus engine is initialized in the backend instead of the progpow, but the progpow wil remain as the default --- Makefile | 4 + cmd/go-quai/main.go | 1 + cmd/go-quai/usage.go | 6 + cmd/utils/flags.go | 130 +++++-- consensus/blake3pow/api.go | 112 ++++++ consensus/blake3pow/blake3pow.go | 260 +++++++++++++ consensus/blake3pow/blake3pow_test.go | 158 ++++++++ consensus/blake3pow/consensus.go | 518 ++++++++++++++++++++++++++ consensus/blake3pow/consensus_test.go | 146 ++++++++ consensus/blake3pow/poem.go | 130 +++++++ consensus/blake3pow/sealer.go | 423 +++++++++++++++++++++ consensus/blake3pow/sealer_test.go | 279 ++++++++++++++ core/genesis.go | 101 +++-- core/types/block.go | 1 + eth/backend.go | 18 +- eth/ethconfig/config.go | 31 +- network.env.dist | 1 + params/bootnodes.go | 20 +- params/config.go | 91 ++++- 19 files changed, 2340 insertions(+), 90 deletions(-) create mode 100644 consensus/blake3pow/api.go create mode 100644 consensus/blake3pow/blake3pow.go create mode 100644 consensus/blake3pow/blake3pow_test.go create mode 100644 consensus/blake3pow/consensus.go create mode 100644 consensus/blake3pow/consensus_test.go create mode 100644 consensus/blake3pow/poem.go create mode 100644 consensus/blake3pow/sealer.go create mode 100644 consensus/blake3pow/sealer_test.go diff --git a/Makefile b/Makefile index 7f1600b114..f32734cc47 100644 --- a/Makefile +++ b/Makefile @@ -68,6 +68,10 @@ ifeq ($(SHOW_COLORS),true) BASE_CMD += --showcolors endif +ifeq ($(RUN_BLAKE3),true) + BASE_CMD += --consensus.engine "blake3" +endif + # Build suburl strings for slice specific subclient groups # WARNING: Only connect to dom/sub clients over a trusted network. ifeq ($(REGION),2) diff --git a/cmd/go-quai/main.go b/cmd/go-quai/main.go index 133865d19d..b4eda6ecbf 100644 --- a/cmd/go-quai/main.go +++ b/cmd/go-quai/main.go @@ -65,6 +65,7 @@ var ( utils.CacheTrieJournalFlag, utils.CacheTrieRejournalFlag, utils.ColosseumFlag, + utils.ConsensusEngineFlag, utils.DNSDiscoveryFlag, utils.DataDirFlag, utils.DeveloperFlag, diff --git a/cmd/go-quai/usage.go b/cmd/go-quai/usage.go index 3b5085328b..f6bc1e25a8 100644 --- a/cmd/go-quai/usage.go +++ b/cmd/go-quai/usage.go @@ -150,6 +150,12 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.MinerEtherbaseFlag, }, }, + { + Name: "CONSENSUS", + Flags: []cli.Flag{ + utils.ConsensusEngineFlag, + }, + }, { Name: "GAS PRICE ORACLE", Flags: []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 731c27b2af..081dba2782 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -39,6 +39,7 @@ import ( "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/common/fdlimit" "github.com/dominant-strategies/go-quai/consensus" + "github.com/dominant-strategies/go-quai/consensus/blake3pow" "github.com/dominant-strategies/go-quai/consensus/progpow" "github.com/dominant-strategies/go-quai/core" "github.com/dominant-strategies/go-quai/core/rawdb" @@ -343,6 +344,12 @@ var ( Name: "cache.preimages", Usage: "Enable recording the SHA3/keccak preimages of trie keys", } + // Consensus settings + ConsensusEngineFlag = cli.StringFlag{ + Name: "consensus.engine", + Usage: "Consensus engine that the blockchain will run and verify blocks using", + Value: "progpow", + } // Miner settings MinerGasPriceFlag = BigFlag{ Name: "miner.gasprice", @@ -1206,24 +1213,45 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { } } -func setProgpow(ctx *cli.Context, cfg *ethconfig.Config) { - // Override any default configs for hard coded networks. - switch { - case ctx.GlobalBool(ColosseumFlag.Name): - cfg.Progpow.DurationLimit = params.DurationLimit - case ctx.GlobalBool(GardenFlag.Name): - cfg.Progpow.DurationLimit = params.GardenDurationLimit - case ctx.GlobalBool(OrchardFlag.Name): - cfg.Progpow.DurationLimit = params.OrchardDurationLimit - case ctx.GlobalBool(GalenaFlag.Name): - cfg.Progpow.DurationLimit = params.GalenaDurationLimit - case ctx.GlobalBool(LocalFlag.Name): - cfg.Progpow.DurationLimit = params.LocalDurationLimit - case ctx.GlobalBool(DeveloperFlag.Name): - cfg.Progpow.DurationLimit = params.DurationLimit - default: - cfg.Progpow.DurationLimit = params.DurationLimit +func setDurationLimit(ctx *cli.Context, cfg *ethconfig.Config) { + if cfg.ConsensusEngine == "blake3" { + // Override any default configs for hard coded networks. + switch { + case ctx.GlobalBool(ColosseumFlag.Name): + cfg.Blake3Pow.DurationLimit = params.DurationLimit + case ctx.GlobalBool(GardenFlag.Name): + cfg.Blake3Pow.DurationLimit = params.GardenDurationLimit + case ctx.GlobalBool(OrchardFlag.Name): + cfg.Blake3Pow.DurationLimit = params.OrchardDurationLimit + case ctx.GlobalBool(GalenaFlag.Name): + cfg.Blake3Pow.DurationLimit = params.GalenaDurationLimit + case ctx.GlobalBool(LocalFlag.Name): + cfg.Blake3Pow.DurationLimit = params.LocalDurationLimit + case ctx.GlobalBool(DeveloperFlag.Name): + cfg.Blake3Pow.DurationLimit = params.DurationLimit + default: + cfg.Blake3Pow.DurationLimit = params.DurationLimit + + } + } else { + // Override any default configs for hard coded networks. + switch { + case ctx.GlobalBool(ColosseumFlag.Name): + cfg.Progpow.DurationLimit = params.DurationLimit + case ctx.GlobalBool(GardenFlag.Name): + cfg.Progpow.DurationLimit = params.GardenDurationLimit + case ctx.GlobalBool(OrchardFlag.Name): + cfg.Progpow.DurationLimit = params.OrchardDurationLimit + case ctx.GlobalBool(GalenaFlag.Name): + cfg.Progpow.DurationLimit = params.GalenaDurationLimit + case ctx.GlobalBool(LocalFlag.Name): + cfg.Progpow.DurationLimit = params.LocalDurationLimit + case ctx.GlobalBool(DeveloperFlag.Name): + cfg.Progpow.DurationLimit = params.DurationLimit + default: + cfg.Progpow.DurationLimit = params.DurationLimit + } } } @@ -1363,7 +1391,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } setGPO(ctx, &cfg.GPO, ctx.GlobalString(SyncModeFlag.Name) == "light") setTxPool(ctx, &cfg.TxPool) - setProgpow(ctx, cfg) + + // If blake3 consensus engine is specifically asked use the blake3 engine + if ctx.GlobalString(ConsensusEngineFlag.Name) == "blake3" { + cfg.ConsensusEngine = "blake3" + } else { + cfg.ConsensusEngine = "progpow" + } + setDurationLimit(ctx, cfg) + setWhitelist(ctx, cfg) // set the dominant chain websocket url @@ -1489,32 +1525,52 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 1 } - cfg.Genesis = core.DefaultColosseumGenesisBlock() - SetDNSDiscoveryDefaults(cfg, params.ColosseumGenesisHash) + cfg.Genesis = core.DefaultColosseumGenesisBlock(cfg.ConsensusEngine) + if cfg.ConsensusEngine == "blake3" { + SetDNSDiscoveryDefaults(cfg, params.Blake3PowGardenGenesisHash) + } else { + SetDNSDiscoveryDefaults(cfg, params.ProgpowGardenGenesisHash) + } case ctx.GlobalBool(GardenFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 2 } - cfg.Genesis = core.DefaultGardenGenesisBlock() - SetDNSDiscoveryDefaults(cfg, params.GardenGenesisHash) + cfg.Genesis = core.DefaultGardenGenesisBlock(cfg.ConsensusEngine) + if cfg.ConsensusEngine == "blake3" { + SetDNSDiscoveryDefaults(cfg, params.Blake3PowGardenGenesisHash) + } else { + SetDNSDiscoveryDefaults(cfg, params.ProgpowGardenGenesisHash) + } case ctx.GlobalBool(OrchardFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 3 } - cfg.Genesis = core.DefaultOrchardGenesisBlock() - SetDNSDiscoveryDefaults(cfg, params.OrchardGenesisHash) + cfg.Genesis = core.DefaultOrchardGenesisBlock(cfg.ConsensusEngine) + if cfg.ConsensusEngine == "blake3" { + SetDNSDiscoveryDefaults(cfg, params.Blake3PowOrchardGenesisHash) + } else { + SetDNSDiscoveryDefaults(cfg, params.ProgpowOrchardGenesisHash) + } case ctx.GlobalBool(LocalFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 4 } - cfg.Genesis = core.DefaultLocalGenesisBlock() - SetDNSDiscoveryDefaults(cfg, params.LocalGenesisHash) + cfg.Genesis = core.DefaultLocalGenesisBlock(cfg.ConsensusEngine) + if cfg.ConsensusEngine == "blake3" { + SetDNSDiscoveryDefaults(cfg, params.Blake3PowLocalGenesisHash) + } else { + SetDNSDiscoveryDefaults(cfg, params.ProgpowLocalGenesisHash) + } case ctx.GlobalBool(GalenaFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 5 } - cfg.Genesis = core.DefaultGalenaGenesisBlock() - SetDNSDiscoveryDefaults(cfg, params.GalenaGenesisHash) + cfg.Genesis = core.DefaultGalenaGenesisBlock(cfg.ConsensusEngine) + if cfg.ConsensusEngine == "blake3" { + SetDNSDiscoveryDefaults(cfg, params.Blake3PowGalenaGenesisHash) + } else { + SetDNSDiscoveryDefaults(cfg, params.ProgpowGalenaGenesisHash) + } case ctx.GlobalBool(DeveloperFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 1337 @@ -1535,7 +1591,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { } default: if cfg.NetworkId == 1 { - SetDNSDiscoveryDefaults(cfg, params.ColosseumGenesisHash) + SetDNSDiscoveryDefaults(cfg, params.ProgpowColosseumGenesisHash) } } if !ctx.GlobalBool(ColosseumFlag.Name) { @@ -1641,15 +1697,15 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis { var genesis *core.Genesis switch { case ctx.GlobalBool(ColosseumFlag.Name): - genesis = core.DefaultColosseumGenesisBlock() + genesis = core.DefaultColosseumGenesisBlock(ctx.GlobalString(ConsensusEngineFlag.Name)) case ctx.GlobalBool(GardenFlag.Name): - genesis = core.DefaultGardenGenesisBlock() + genesis = core.DefaultGardenGenesisBlock(ctx.GlobalString(ConsensusEngineFlag.Name)) case ctx.GlobalBool(OrchardFlag.Name): - genesis = core.DefaultOrchardGenesisBlock() + genesis = core.DefaultOrchardGenesisBlock(ctx.GlobalString(ConsensusEngineFlag.Name)) case ctx.GlobalBool(GalenaFlag.Name): - genesis = core.DefaultGalenaGenesisBlock() + genesis = core.DefaultGalenaGenesisBlock(ctx.GlobalString(ConsensusEngineFlag.Name)) case ctx.GlobalBool(LocalFlag.Name): - genesis = core.DefaultLocalGenesisBlock() + genesis = core.DefaultLocalGenesisBlock(ctx.GlobalString(ConsensusEngineFlag.Name)) case ctx.GlobalBool(DeveloperFlag.Name): Fatalf("Developer chains are ephemeral") } @@ -1670,6 +1726,12 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (*core.Core, ethdb.Database) if !ctx.GlobalBool(FakePoWFlag.Name) { engine = progpow.New(progpow.Config{}, nil, false) } + + // If blake3 consensus engine is selected use the blake3 engine + if ctx.GlobalString(ConsensusEngineFlag.Name) == "blake3" { + engine = blake3pow.New(blake3pow.Config{}, nil, false) + } + if gcmode := ctx.GlobalString(GCModeFlag.Name); gcmode != "full" && gcmode != "archive" { Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) } diff --git a/consensus/blake3pow/api.go b/consensus/blake3pow/api.go new file mode 100644 index 0000000000..622fe78621 --- /dev/null +++ b/consensus/blake3pow/api.go @@ -0,0 +1,112 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package blake3pow + +import ( + "errors" + + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/common/hexutil" + "github.com/dominant-strategies/go-quai/core/types" +) + +var errBlake3powStopped = errors.New("blake3pow stopped") + +// API exposes blake3pow related methods for the RPC interface. +type API struct { + blake3pow *Blake3pow +} + +// GetWork returns a work package for external miner. +// +// The work package consists of 3 strings: +// +// result[0] - 32 bytes hex encoded current block header pow-hash +// result[1] - 32 bytes hex encoded seed hash used for DAG +// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3] - hex encoded block number +func (api *API) GetWork() ([4]string, error) { + if api.blake3pow.remote == nil { + return [4]string{}, errors.New("not supported") + } + + var ( + workCh = make(chan [4]string, 1) + errc = make(chan error, 1) + ) + select { + case api.blake3pow.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}: + case <-api.blake3pow.remote.exitCh: + return [4]string{}, errBlake3powStopped + } + select { + case work := <-workCh: + return work, nil + case err := <-errc: + return [4]string{}, err + } +} + +// SubmitWork can be used by external miner to submit their POW solution. +// It returns an indication if the work was accepted. +// Note either an invalid solution, a stale work a non-existent work will return false. +func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool { + if api.blake3pow.remote == nil { + return false + } + + var errc = make(chan error, 1) + select { + case api.blake3pow.remote.submitWorkCh <- &mineResult{ + nonce: nonce, + hash: hash, + errc: errc, + }: + case <-api.blake3pow.remote.exitCh: + return false + } + err := <-errc + return err == nil +} + +// SubmitHashrate can be used for remote miners to submit their hash rate. +// This enables the node to report the combined hash rate of all miners +// which submit work through this node. +// +// It accepts the miner hash rate and an identifier which must be unique +// between nodes. +func (api *API) SubmitHashrate(rate hexutil.Uint64, id common.Hash) bool { + if api.blake3pow.remote == nil { + return false + } + + var done = make(chan struct{}, 1) + select { + case api.blake3pow.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}: + case <-api.blake3pow.remote.exitCh: + return false + } + + // Block until hash rate submitted successfully. + <-done + return true +} + +// Gblake3powrate returns the current hashrate for local CPU miner and remote miner. +func (api *API) Gblake3powrate() uint64 { + return uint64(api.blake3pow.Hashrate()) +} diff --git a/consensus/blake3pow/blake3pow.go b/consensus/blake3pow/blake3pow.go new file mode 100644 index 0000000000..f5ea303a62 --- /dev/null +++ b/consensus/blake3pow/blake3pow.go @@ -0,0 +1,260 @@ +package blake3pow + +import ( + "math/big" + "math/rand" + "sync" + "time" + + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/common/hexutil" + "github.com/dominant-strategies/go-quai/consensus" + "github.com/dominant-strategies/go-quai/log" + "github.com/dominant-strategies/go-quai/metrics" + "github.com/dominant-strategies/go-quai/rpc" +) + +var ( + // sharedBlake3pow is a full instance that can be shared between multiple users. + sharedBlake3pow *Blake3pow +) + +func init() { + sharedConfig := Config{ + PowMode: ModeNormal, + } + sharedBlake3pow = New(sharedConfig, nil, false) +} + +// Mode defines the type and amount of PoW verification a blake3pow engine makes. +type Mode uint + +const ( + ModeNormal Mode = iota + ModeShared + ModeTest + ModeFake + ModeFullFake +) + +// Config are the configuration parameters of the blake3pow. +type Config struct { + PowMode Mode + + DurationLimit *big.Int + + // When set, notifications sent by the remote sealer will + // be block header JSON objects instead of work package arrays. + NotifyFull bool + + Log *log.Logger `toml:"-"` +} + +// Blake3pow is a proof-of-work consensus engine using the blake3 hash algorithm +type Blake3pow struct { + config Config + + // Mining related fields + rand *rand.Rand // Properly seeded random source for nonces + threads int // Number of threads to mine on if mining + update chan struct{} // Notification channel to update mining parameters + hashrate metrics.Meter // Meter tracking the average hashrate + remote *remoteSealer + + // The fields below are hooks for testing + shared *Blake3pow // Shared PoW verifier to avoid cache regeneration + fakeFail uint64 // Block number which fails PoW check even in fake mode + fakeDelay time.Duration // Time delay to sleep for before returning from verify + + lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields + closeOnce sync.Once // Ensures exit channel will not be closed twice. +} + +// New creates a full sized blake3pow PoW scheme and starts a background thread for +// remote mining, also optionally notifying a batch of remote services of new work +// packages. +func New(config Config, notify []string, noverify bool) *Blake3pow { + if config.Log == nil { + config.Log = &log.Log + } + blake3pow := &Blake3pow{ + config: config, + update: make(chan struct{}), + hashrate: metrics.NewMeterForced(), + } + if config.PowMode == ModeShared { + blake3pow.shared = sharedBlake3pow + } + blake3pow.remote = startRemoteSealer(blake3pow, notify, noverify) + return blake3pow +} + +// NewTester creates a small sized blake3pow PoW scheme useful only for testing +// purposes. +func NewTester(notify []string, noverify bool) *Blake3pow { + return New(Config{PowMode: ModeTest}, notify, noverify) +} + +// NewFaker creates a blake3pow consensus engine with a fake PoW scheme that accepts +// all blocks' seal as valid, though they still have to conform to the Quai +// consensus rules. +func NewFaker() *Blake3pow { + return &Blake3pow{ + config: Config{ + PowMode: ModeFake, + Log: &log.Log, + }, + } +} + +// NewFakeFailer creates a blake3pow consensus engine with a fake PoW scheme that +// accepts all blocks as valid apart from the single one specified, though they +// still have to conform to the Quai consensus rules. +func NewFakeFailer(fail uint64) *Blake3pow { + return &Blake3pow{ + config: Config{ + PowMode: ModeFake, + Log: &log.Log, + }, + fakeFail: fail, + } +} + +// NewFakeDelayer creates a blake3pow consensus engine with a fake PoW scheme that +// accepts all blocks as valid, but delays verifications by some time, though +// they still have to conform to the Quai consensus rules. +func NewFakeDelayer(delay time.Duration) *Blake3pow { + return &Blake3pow{ + config: Config{ + PowMode: ModeFake, + Log: &log.Log, + }, + fakeDelay: delay, + } +} + +// NewFullFaker creates an blake3pow consensus engine with a full fake scheme that +// accepts all blocks as valid, without checking any consensus rules whatsoever. +func NewFullFaker() *Blake3pow { + return &Blake3pow{ + config: Config{ + PowMode: ModeFullFake, + Log: &log.Log, + }, + } +} + +// NewShared creates a full sized blake3pow PoW shared between all requesters running +// in the same process. +func NewShared() *Blake3pow { + return &Blake3pow{shared: sharedBlake3pow} +} + +// Close closes the exit channel to notify all backend threads exiting. +func (blake3pow *Blake3pow) Close() error { + blake3pow.closeOnce.Do(func() { + // Short circuit if the exit channel is not allocated. + if blake3pow.remote == nil { + return + } + close(blake3pow.remote.requestExit) + <-blake3pow.remote.exitCh + }) + return nil +} + +// Threads returns the number of mining threads currently enabled. This doesn't +// necessarily mean that mining is running! +func (blake3pow *Blake3pow) Threads() int { + blake3pow.lock.Lock() + defer blake3pow.lock.Unlock() + + return blake3pow.threads +} + +// SetThreads updates the number of mining threads currently enabled. Calling +// this method does not start mining, only sets the thread count. If zero is +// specified, the miner will use all cores of the machine. Setting a thread +// count below zero is allowed and will cause the miner to idle, without any +// work being done. +func (blake3pow *Blake3pow) SetThreads(threads int) { + blake3pow.lock.Lock() + defer blake3pow.lock.Unlock() + + if blake3pow.shared != nil { + // If we're running a shared PoW, set the thread count on that instead + blake3pow.shared.SetThreads(threads) + } else { + // Update the threads and ping any running seal to pull in any changes + blake3pow.threads = threads + select { + case blake3pow.update <- struct{}{}: + default: + } + } +} + +// Hashrate implements PoW, returning the measured rate of the search invocations +// per second over the last minute. +// Note the returned hashrate includes local hashrate, but also includes the total +// hashrate of all remote miner. +func (blake3pow *Blake3pow) Hashrate() float64 { + // Short circuit if we are run the blake3pow in normal/test mode. + if blake3pow.config.PowMode != ModeNormal && blake3pow.config.PowMode != ModeTest { + return blake3pow.hashrate.Rate1() + } + var res = make(chan uint64, 1) + + select { + case blake3pow.remote.fetchRateCh <- res: + case <-blake3pow.remote.exitCh: + // Return local hashrate only if blake3pow is stopped. + return blake3pow.hashrate.Rate1() + } + + // Gather total submitted hash rate of remote sealers. + return blake3pow.hashrate.Rate1() + float64(<-res) +} + +// SubmitHashrate can be used for remote miners to submit their hash rate. +// This enables the node to report the combined hash rate of all miners +// which submit work through this node. +// +// It accepts the miner hash rate and an identifier which must be unique +// between nodes. +func (blake3pow *Blake3pow) SubmitHashrate(rate hexutil.Uint64, id common.Hash) bool { + if blake3pow.remote == nil { + return false + } + + var done = make(chan struct{}, 1) + select { + case blake3pow.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}: + case <-blake3pow.remote.exitCh: + return false + } + + // Block until hash rate submitted successfully. + <-done + return true +} + +// APIs implements consensus.Engine, returning the user facing RPC APIs. +func (blake3pow *Blake3pow) APIs(chain consensus.ChainHeaderReader) []rpc.API { + // In order to ensure backward compatibility, we exposes blake3pow RPC APIs + // to both eth and blake3pow namespaces. + return []rpc.API{ + { + Namespace: "eth", + Version: "1.0", + Service: &API{blake3pow}, + Public: true, + }, + { + Namespace: "blake3pow", + Version: "1.0", + Service: &API{blake3pow}, + Public: true, + }, + } +} diff --git a/consensus/blake3pow/blake3pow_test.go b/consensus/blake3pow/blake3pow_test.go new file mode 100644 index 0000000000..4d03eb1a06 --- /dev/null +++ b/consensus/blake3pow/blake3pow_test.go @@ -0,0 +1,158 @@ +package blake3pow + +import ( + "io/ioutil" + "math/big" + "math/rand" + "os" + "sync" + "testing" + "time" + + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/common/hexutil" + "github.com/dominant-strategies/go-quai/core/types" +) + +// Tests that blake3pow works correctly in test mode. +func TestTestMode(t *testing.T) { + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + + blake3pow := NewTester(nil, false) + defer blake3pow.Close() + + results := make(chan *types.Block) + err := blake3pow.Seal(nil, types.NewBlockWithHeader(header), results, nil) + if err != nil { + t.Fatalf("failed to seal block: %v", err) + } + select { + case block := <-results: + header.Nonce() = types.EncodeNonce(block.Nonce()) + if err := blake3pow.verifySeal(nil, header, false); err != nil { + t.Fatalf("unexpected verification error: %v", err) + } + case <-time.NewTimer(4 * time.Second).C: + t.Error("sealing result timeout") + } +} + +// This test checks that cache lru logic doesn't crash under load. +func TestCacheFileEvict(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "blake3pow-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + config := Config{ + CachesInMem: 3, + CachesOnDisk: 10, + CacheDir: tmpdir, + PowMode: ModeTest, + } + e := New(config, nil, false) + defer e.Close() + + workers := 8 + epochs := 100 + var wg sync.WaitGroup + wg.Add(workers) + for i := 0; i < workers; i++ { + go verifyTest(&wg, e, i, epochs) + } + wg.Wait() +} + +func verifyTest(wg *sync.WaitGroup, e *Blake3pow, workerIndex, epochs int) { + defer wg.Done() + + const wiggle = 4 * epochLength + r := rand.New(rand.NewSource(int64(workerIndex))) + for epoch := 0; epoch < epochs; epoch++ { + block := int64(epoch)*epochLength - wiggle/2 + r.Int63n(wiggle) + if block < 0 { + block = 0 + } + header := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)} + e.verifySeal(nil, header, false) + } +} + +func TestRemoteSealer(t *testing.T) { + blake3pow := NewTester(nil, false) + defer blake3pow.Close() + + api := &API{blake3pow} + if _, err := api.GetWork(); err != errNoMiningWork { + t.Error("expect to return an error indicate there is no mining work") + } + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + sealhash := blake3pow.SealHash(header) + + // Push new work. + results := make(chan *types.Block) + blake3pow.Seal(nil, block, results, nil) + + var ( + work [4]string + err error + ) + if work, err = api.GetWork(); err != nil || work[0] != sealhash.Hex() { + t.Error("expect to return a mining work has same hash") + } + + if res := api.SubmitWork(types.BlockNonce{}, sealhash, common.Hash{}); res { + t.Error("expect to return false when submit a fake solution") + } + // Push new block with same block number to replace the original one. + header = &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(1000)} + block = types.NewBlockWithHeader(header) + sealhash = blake3pow.SealHash(header) + blake3pow.Seal(nil, block, results, nil) + + if work, err = api.GetWork(); err != nil || work[0] != sealhash.Hex() { + t.Error("expect to return the latest pushed work") + } +} + +func TestHashrate(t *testing.T) { + var ( + hashrate = []hexutil.Uint64{100, 200, 300} + expect uint64 + ids = []common.Hash{common.HexToHash("a"), common.HexToHash("b"), common.HexToHash("c")} + ) + blake3pow := NewTester(nil, false) + defer blake3pow.Close() + + if tot := blake3pow.Hashrate(); tot != 0 { + t.Error("expect the result should be zero") + } + + api := &API{blake3pow} + for i := 0; i < len(hashrate); i += 1 { + if res := api.SubmitHashrate(hashrate[i], ids[i]); !res { + t.Error("remote miner submit hashrate failed") + } + expect += uint64(hashrate[i]) + } + if tot := blake3pow.Hashrate(); tot != float64(expect) { + t.Error("expect total hashrate should be same") + } +} + +func TestClosedRemoteSealer(t *testing.T) { + blake3pow := NewTester(nil, false) + time.Sleep(1 * time.Second) // ensure exit channel is listening + blake3pow.Close() + + api := &API{blake3pow} + if _, err := api.GetWork(); err != errBlake3powStopped { + t.Error("expect to return an error to indicate blake3pow is stopped") + } + + if res := api.SubmitHashrate(hexutil.Uint64(100), common.HexToHash("a")); res { + t.Error("expect to return false when submit hashrate to a stopped blake3pow") + } +} diff --git a/consensus/blake3pow/consensus.go b/consensus/blake3pow/consensus.go new file mode 100644 index 0000000000..d3ebb55712 --- /dev/null +++ b/consensus/blake3pow/consensus.go @@ -0,0 +1,518 @@ +package blake3pow + +import ( + "errors" + "fmt" + "math/big" + "runtime" + "time" + + mapset "github.com/deckarep/golang-set" + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/consensus" + "github.com/dominant-strategies/go-quai/consensus/misc" + "github.com/dominant-strategies/go-quai/core" + "github.com/dominant-strategies/go-quai/core/state" + "github.com/dominant-strategies/go-quai/core/types" + "github.com/dominant-strategies/go-quai/log" + "github.com/dominant-strategies/go-quai/params" + "github.com/dominant-strategies/go-quai/trie" +) + +// Blake3pow proof-of-work protocol constants. +var ( + maxUncles = 2 // Maximum number of uncles allowed in a single block + allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks + + ContextTimeFactor = big10 + ZoneBlockReward = big.NewInt(5e+18) + RegionBlockReward = new(big.Int).Mul(ZoneBlockReward, big3) + PrimeBlockReward = new(big.Int).Mul(RegionBlockReward, big3) +) + +// Some useful constants to avoid constant memory allocs for them. +var ( + expDiffPeriod = big.NewInt(100000) + big0 = big.NewInt(0) + big1 = big.NewInt(1) + big2 = big.NewInt(2) + big3 = big.NewInt(3) + big8 = big.NewInt(8) + big9 = big.NewInt(9) + big10 = big.NewInt(10) + big32 = big.NewInt(32) + bigMinus99 = big.NewInt(-99) + big2e256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0)) // 2^256 +) + +// Various error messages to mark blocks invalid. These should be private to +// prevent engine specific errors from being referenced in the remainder of the +// codebase, inherently breaking if the engine is swapped out. Please put common +// error types into the consensus package. +var ( + errOlderBlockTime = errors.New("timestamp older than parent") + errTooManyUncles = errors.New("too many uncles") + errDuplicateUncle = errors.New("duplicate uncle") + errUncleIsAncestor = errors.New("uncle is ancestor") + errDanglingUncle = errors.New("uncle's parent is not ancestor") + errInvalidDifficulty = errors.New("non-positive difficulty") + errDifficultyCrossover = errors.New("sub's difficulty exceeds dom's") + errInvalidPoW = errors.New("invalid proof-of-work") + errInvalidOrder = errors.New("invalid order") +) + +// Author implements consensus.Engine, returning the header's coinbase as the +// proof-of-work verified author of the block. +func (blake3pow *Blake3pow) Author(header *types.Header) (common.Address, error) { + return header.Coinbase(), nil +} + +// VerifyHeader checks whether a header conforms to the consensus rules of the +// stock Quai blake3pow engine. +func (blake3pow *Blake3pow) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { + // If we're running a full engine faking, accept any input as valid + if blake3pow.config.PowMode == ModeFullFake { + return nil + } + // Short circuit if the header is known, or its parent not + number := header.NumberU64() + if chain.GetHeader(header.Hash(), number) != nil { + return nil + } + parent := chain.GetHeader(header.ParentHash(), number-1) + if parent == nil { + return consensus.ErrUnknownAncestor + } + // Sanity checks passed, do a proper verification + return blake3pow.verifyHeader(chain, header, parent, false, seal, time.Now().Unix()) +} + +// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers +// concurrently. The method returns a quit channel to abort the operations and +// a results channel to retrieve the async verifications. +func (blake3pow *Blake3pow) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { + // If we're running a full engine faking, accept any input as valid + if blake3pow.config.PowMode == ModeFullFake || len(headers) == 0 { + abort, results := make(chan struct{}), make(chan error, len(headers)) + for i := 0; i < len(headers); i++ { + results <- nil + } + return abort, results + } + + // Spawn as many workers as allowed threads + workers := runtime.GOMAXPROCS(0) + if len(headers) < workers { + workers = len(headers) + } + + // Create a task channel and spawn the verifiers + var ( + inputs = make(chan int) + done = make(chan int, workers) + errors = make([]error, len(headers)) + abort = make(chan struct{}) + unixNow = time.Now().Unix() + ) + for i := 0; i < workers; i++ { + go func() { + for index := range inputs { + errors[index] = blake3pow.verifyHeaderWorker(chain, headers, seals, index, unixNow) + done <- index + } + }() + } + + errorsOut := make(chan error, len(headers)) + go func() { + defer close(inputs) + var ( + in, out = 0, 0 + checked = make([]bool, len(headers)) + inputs = inputs + ) + for { + select { + case inputs <- in: + if in++; in == len(headers) { + // Reached end of headers. Stop sending to workers. + inputs = nil + } + case index := <-done: + for checked[index] = true; checked[out]; out++ { + errorsOut <- errors[out] + if out == len(headers)-1 { + return + } + } + case <-abort: + return + } + } + }() + return abort, errorsOut +} + +func (blake3pow *Blake3pow) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int, unixNow int64) error { + var parent *types.Header + if index == 0 { + parent = chain.GetHeader(headers[0].ParentHash(), headers[0].NumberU64()-1) + } else if headers[index-1].Hash() == headers[index].ParentHash() { + parent = headers[index-1] + } + if parent == nil { + return consensus.ErrUnknownAncestor + } + return blake3pow.verifyHeader(chain, headers[index], parent, false, seals[index], unixNow) +} + +// VerifyUncles verifies that the given block's uncles conform to the consensus +// rules of the stock Quai blake3pow engine. +func (blake3pow *Blake3pow) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { + // If we're running a full engine faking, accept any input as valid + if blake3pow.config.PowMode == ModeFullFake { + return nil + } + // Verify that there are at most 2 uncles included in this block + if len(block.Uncles()) > maxUncles { + return errTooManyUncles + } + if len(block.Uncles()) == 0 { + return nil + } + // Gather the set of past uncles and ancestors + uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.Header) + + number, parent := block.NumberU64()-1, block.ParentHash() + for i := 0; i < 7; i++ { + ancestorHeader := chain.GetHeader(parent, number) + if ancestorHeader == nil { + break + } + ancestors[parent] = ancestorHeader + // If the ancestor doesn't have any uncles, we don't have to iterate them + if ancestorHeader.UncleHash() != types.EmptyUncleHash { + // Need to add those uncles to the banned list too + ancestor := chain.GetBlock(parent, number) + if ancestor == nil { + break + } + for _, uncle := range ancestor.Uncles() { + uncles.Add(uncle.Hash()) + } + } + parent, number = ancestorHeader.ParentHash(), number-1 + } + ancestors[block.Hash()] = block.Header() + uncles.Add(block.Hash()) + + // Verify each of the uncles that it's recent, but not an ancestor + for _, uncle := range block.Uncles() { + // Make sure every uncle is rewarded only once + hash := uncle.Hash() + if uncles.Contains(hash) { + return errDuplicateUncle + } + uncles.Add(hash) + + // Make sure the uncle has a valid ancestry + if ancestors[hash] != nil { + return errUncleIsAncestor + } + if ancestors[uncle.ParentHash()] == nil || uncle.ParentHash() == block.ParentHash() { + return errDanglingUncle + } + if err := blake3pow.verifyHeader(chain, uncle, ancestors[uncle.ParentHash()], true, true, time.Now().Unix()); err != nil { + return err + } + } + return nil +} + +// verifyHeader checks whether a header conforms to the consensus rules +func (blake3pow *Blake3pow) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool, unixNow int64) error { + nodeCtx := common.NodeLocation.Context() + // Ensure that the header's extra-data section is of a reasonable size + if uint64(len(header.Extra())) > params.MaximumExtraDataSize { + return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra()), params.MaximumExtraDataSize) + } + // Verify the header's timestamp + if !uncle { + if header.Time() > uint64(unixNow+allowedFutureBlockTimeSeconds) { + return consensus.ErrFutureBlock + } + } + if header.Time() < parent.Time() { + return errOlderBlockTime + } + // Verify the block's difficulty based on its timestamp and parent's difficulty + // difficulty adjustment can only be checked in zone + if nodeCtx == common.ZONE_CTX { + expected := blake3pow.CalcDifficulty(chain, parent) + if expected.Cmp(header.Difficulty()) != 0 { + return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty(), expected) + } + } + + _, order, err := blake3pow.CalcOrder(header) + if err != nil { + return err + } + if order > nodeCtx { + return fmt.Errorf("order of the block is greater than the context") + } + + if !common.NodeLocation.InSameSliceAs(header.Location()) { + return fmt.Errorf("block location is not in the same slice as the node location") + } + + // Verify that the parent entropy is calculated correctly on the header + parentEntropy := blake3pow.TotalLogS(parent) + if parentEntropy.Cmp(header.ParentEntropy()) != 0 { + return fmt.Errorf("invalid parent entropy: have %v, want %v", header.ParentEntropy(), parentEntropy) + } + + // If not prime, verify the parentDeltaS field as well + if nodeCtx > common.PRIME_CTX { + _, parentOrder, _ := blake3pow.CalcOrder(parent) + // If parent was dom, deltaS is zero and otherwise should be the calc delta s on the parent + if parentOrder < nodeCtx { + if common.Big0.Cmp(header.ParentDeltaS()) != 0 { + return fmt.Errorf("invalid parent delta s: have %v, want %v", header.ParentDeltaS(), common.Big0) + } + } else { + parentDeltaS := blake3pow.DeltaLogS(parent) + if parentDeltaS.Cmp(header.ParentDeltaS()) != 0 { + return fmt.Errorf("invalid parent delta s: have %v, want %v", header.ParentDeltaS(), parentDeltaS) + } + } + } + + if nodeCtx == common.ZONE_CTX { + // Verify that the gas limit is <= 2^63-1 + cap := uint64(0x7fffffffffffffff) + if header.GasLimit() > cap { + return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit(), cap) + } + // Verify that the gasUsed is <= gasLimit + if header.GasUsed() > header.GasLimit() { + return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed(), header.GasLimit()) + } + // Verify the block's gas usage and verify the base fee. + // Verify that the gas limit remains within allowed bounds + if err := misc.VerifyGaslimit(parent.GasLimit(), header.GasLimit()); err != nil { + return err + } + // Verify the header is not malformed + if header.BaseFee() == nil { + return fmt.Errorf("header is missing baseFee") + } + // Verify the baseFee is correct based on the parent header. + expectedBaseFee := misc.CalcBaseFee(chain.Config(), parent) + if header.BaseFee().Cmp(expectedBaseFee) != 0 { + return fmt.Errorf("invalid baseFee: have %s, want %s, parentBaseFee %s, parentGasUsed %d", + expectedBaseFee, header.BaseFee(), parent.BaseFee(), parent.GasUsed()) + } + } + // Verify that the block number is parent's +1 + if diff := new(big.Int).Sub(header.Number(), parent.Number()); diff.Cmp(big.NewInt(1)) != 0 { + return consensus.ErrInvalidNumber + } + // Verify the engine specific seal securing the block + if seal { + if err := blake3pow.verifySeal(chain, header, false); err != nil { + return err + } + } + return nil +} + +// CalcDifficulty is the difficulty adjustment algorithm. It returns +// the difficulty that a new block should have when created at time +// given the parent block's time and difficulty. +func (blake3pow *Blake3pow) CalcDifficulty(chain consensus.ChainHeaderReader, parent *types.Header) *big.Int { + nodeCtx := common.NodeLocation.Context() + + if nodeCtx != common.ZONE_CTX { + log.Error("Cannot CalcDifficulty for", "context", nodeCtx) + return nil + } + // algorithm: + // diff = (parent_diff + + // (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) + // ) + 2^(periodCount - 2) + + time := parent.Time() + + if parent.Hash() == chain.Config().GenesisHash { + return parent.Difficulty() + } + + parentOfParent := chain.GetHeaderByHash(parent.ParentHash()) + + bigTime := new(big.Int).SetUint64(time) + bigParentTime := new(big.Int).SetUint64(parentOfParent.Time()) + + // holds intermediate values to make the algo easier to read & audit + x := new(big.Int) + y := new(big.Int) + + // (2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // duration_limit + x.Sub(bigTime, bigParentTime) + x.Div(x, blake3pow.config.DurationLimit) + if parent.UncleHash() == types.EmptyUncleHash { + x.Sub(big1, x) + } else { + x.Sub(big2, x) + } + // max((2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9, -99) + if x.Cmp(bigMinus99) < 0 { + x.Set(bigMinus99) + } + // parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) + y.Div(parent.Difficulty(), params.DifficultyBoundDivisor) + x.Mul(y, x) + x.Add(parent.Difficulty(), x) + + // minimum difficulty can ever be (before exponential factor) + if x.Cmp(params.MinimumDifficulty) < 0 { + x.Set(params.MinimumDifficulty) + } + + return x +} + +func (blake3pow *Blake3pow) IsDomCoincident(chain consensus.ChainHeaderReader, header *types.Header) bool { + _, order, err := blake3pow.CalcOrder(header) + if err != nil { + return false + } + return order < common.NodeLocation.Context() +} + +// verifySeal checks whether a block satisfies the PoW difficulty requirements, +// either using the usual blake3pow cache for it, or alternatively using a full DAG +// to make remote mining fast. +func (blake3pow *Blake3pow) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, fulldag bool) error { + // If we're running a fake PoW, accept any seal as valid + if blake3pow.config.PowMode == ModeFake || blake3pow.config.PowMode == ModeFullFake { + time.Sleep(blake3pow.fakeDelay) + if blake3pow.fakeFail == header.Number().Uint64() { + return errInvalidPoW + } + return nil + } + // If we're running a shared PoW, delegate verification to it + if blake3pow.shared != nil { + return blake3pow.shared.verifySeal(chain, header, fulldag) + } + // Ensure that we have a valid difficulty for the block + if header.Difficulty().Sign() <= 0 { + return errInvalidDifficulty + } + // Check for valid zone share and order matches context + _, order, err := blake3pow.CalcOrder(header) + if err != nil { + return err + } else { + if order > common.NodeLocation.Context() { + return errInvalidOrder + } + } + + return nil +} + +// Prepare implements consensus.Engine, initializing the difficulty field of a +// header to conform to the blake3pow protocol. The changes are done inline. +func (blake3pow *Blake3pow) Prepare(chain consensus.ChainHeaderReader, header *types.Header, parent *types.Header) error { + header.SetDifficulty(blake3pow.CalcDifficulty(chain, parent)) + return nil +} + +// Finalize implements consensus.Engine, accumulating the block and uncle rewards, +// setting the final state on the header +func (blake3pow *Blake3pow) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) { + // Accumulate any block and uncle rewards and commit the final state root + accumulateRewards(chain.Config(), state, header, uncles) + + if common.NodeLocation.Context() == common.ZONE_CTX && header.ParentHash() == chain.Config().GenesisHash { + alloc := core.ReadGenesisAlloc("genallocs/gen_alloc_" + common.NodeLocation.Name() + ".json") + log.Info("Allocating genesis accounts", "num", len(alloc)) + + for addressString, account := range alloc { + addr := common.HexToAddress(addressString) + internal, err := addr.InternalAddress() + if err != nil { + log.Error("Provided address in genesis block is out of scope") + } + state.AddBalance(internal, account.Balance) + state.SetCode(internal, account.Code) + state.SetNonce(internal, account.Nonce) + for key, value := range account.Storage { + state.SetState(internal, key, value) + } + } + } + + header.SetRoot(state.IntermediateRoot(true)) +} + +// FinalizeAndAssemble implements consensus.Engine, accumulating the block and +// uncle rewards, setting the final state and assembling the block. +func (blake3pow *Blake3pow) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, etxs []*types.Transaction, subManifest types.BlockManifest, receipts []*types.Receipt) (*types.Block, error) { + nodeCtx := common.NodeLocation.Context() + if nodeCtx == common.ZONE_CTX { + // Finalize block + blake3pow.Finalize(chain, header, state, txs, uncles) + } + + // Header seems complete, assemble into a block and return + return types.NewBlock(header, txs, uncles, etxs, subManifest, receipts, trie.NewStackTrie(nil)), nil +} + +func (blake3pow *Blake3pow) ComputePowLight(header *types.Header) (common.Hash, common.Hash) { + return common.Hash{}, common.Hash{} +} + +// AccumulateRewards credits the coinbase of the given block with the mining +// reward. The total reward consists of the static block reward and rewards for +// included uncles. The coinbase of each uncle block is also rewarded. +func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { + // Select the correct block reward based on chain progression + blockReward := misc.CalculateReward() + + coinbase, err := header.Coinbase().InternalAddress() + if err != nil { + log.Error("Block has out of scope coinbase, skipping block reward", "Address", header.Coinbase().String(), "Hash", header.Hash().String()) + return + } + + // Accumulate the rewards for the miner and any included uncles + reward := new(big.Int).Set(blockReward) + r := new(big.Int) + for _, uncle := range uncles { + coinbase, err := uncle.Coinbase().InternalAddress() + if err != nil { + log.Error("Found uncle with out of scope coinbase, skipping reward", "Address", uncle.Coinbase().String(), "Hash", uncle.Hash().String()) + continue + } + r.Add(uncle.Number(), big8) + r.Sub(r, header.Number()) + r.Mul(r, blockReward) + r.Div(r, big8) + state.AddBalance(coinbase, r) + + r.Div(blockReward, big32) + reward.Add(reward, r) + } + state.AddBalance(coinbase, reward) +} + +func TargetToDifficulty(target *big.Int) *big.Int { + return new(big.Int).Div(big2e256, target) +} + +func DifficultyToTarget(difficulty *big.Int) *big.Int { + return TargetToDifficulty(difficulty) +} diff --git a/consensus/blake3pow/consensus_test.go b/consensus/blake3pow/consensus_test.go new file mode 100644 index 0000000000..0d7fbe310f --- /dev/null +++ b/consensus/blake3pow/consensus_test.go @@ -0,0 +1,146 @@ +package blake3pow + +import ( + "encoding/binary" + "encoding/json" + "math/big" + "math/rand" + "os" + "path/filepath" + "testing" + + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/common/math" + "github.com/dominant-strategies/go-quai/core/types" + "github.com/dominant-strategies/go-quai/params" +) + +type diffTest struct { + ParentTimestamp uint64 + ParentDifficulty *big.Int + CurrentTimestamp uint64 + CurrentBlocknumber *big.Int + CurrentDifficulty *big.Int +} + +func (d *diffTest) UnmarshalJSON(b []byte) (err error) { + var ext struct { + ParentTimestamp string + ParentDifficulty string + CurrentTimestamp string + CurrentBlocknumber string + CurrentDifficulty string + } + if err := json.Unmarshal(b, &ext); err != nil { + return err + } + + d.ParentTimestamp = math.MustParseUint64(ext.ParentTimestamp) + d.ParentDifficulty = math.MustParseBig256(ext.ParentDifficulty) + d.CurrentTimestamp = math.MustParseUint64(ext.CurrentTimestamp) + d.CurrentBlocknumber = math.MustParseBig256(ext.CurrentBlocknumber) + d.CurrentDifficulty = math.MustParseBig256(ext.CurrentDifficulty) + + return nil +} + +func TestCalcDifficulty(t *testing.T) { + file, err := os.Open(filepath.Join("..", "..", "tests", "testdata", "BasicTests", "difficulty.json")) + if err != nil { + t.Skip(err) + } + defer file.Close() + + tests := make(map[string]diffTest) + err = json.NewDecoder(file).Decode(&tests) + if err != nil { + t.Fatal(err) + } + + config := ¶ms.ChainConfig{} + + for name, test := range tests { + number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1)) + diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{ + Number: number, + Time: test.ParentTimestamp, + Difficulty: test.ParentDifficulty, + }) + if diff.Cmp(test.CurrentDifficulty) != 0 { + t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff) + } + } +} + +func randSlice(min, max uint32) []byte { + var b = make([]byte, 4) + rand.Read(b) + a := binary.LittleEndian.Uint32(b) + size := min + a%(max-min) + out := make([]byte, size) + rand.Read(out) + return out +} + +func TestDifficultyCalculators(t *testing.T) { + rand.Seed(2) + for i := 0; i < 5000; i++ { + // 1 to 300 seconds diff + var timeDelta = uint64(1 + rand.Uint32()%3000) + diffBig := big.NewInt(0).SetBytes(randSlice(2, 10)) + if diffBig.Cmp(params.MinimumDifficulty) < 0 { + diffBig.Set(params.MinimumDifficulty) + } + //rand.Read(difficulty) + header := &types.Header{ + Difficulty: diffBig, + Number: new(big.Int).SetUint64(rand.Uint64() % 50_000_000), + Time: rand.Uint64() - timeDelta, + } + if rand.Uint32()&1 == 0 { + header.UncleHash() = types.EmptyUncleHash + } + bombDelay := new(big.Int).SetUint64(rand.Uint64() % 50_000_000) + for i, pair := range []struct { + bigFn func(time uint64, parent *types.Header) *big.Int + u256Fn func(time uint64, parent *types.Header) *big.Int + }{ + {DynamicDifficultyCalculator(bombDelay), MakeDifficultyCalculatorU256(bombDelay)}, + } { + time := header.Time() + timeDelta + want := pair.bigFn(time, header) + have := pair.u256Fn(time, header) + if want.BitLen() > 256 { + continue + } + if want.Cmp(have) != 0 { + t.Fatalf("pair %d: want %x have %x\nparent.Number: %x\np.Time: %x\nc.Time: %x\nBombdelay: %v\n", i, want, have, + header.Number(), header.Time(), time, bombDelay) + } + } + } +} + +func BenchmarkDifficultyCalculator(b *testing.B) { + x1 := makeDifficultyCalculator(big.NewInt(1000000)) + x2 := MakeDifficultyCalculatorU256(big.NewInt(1000000)) + h := &types.Header{ + ParentHash: common.Hash{}, + UncleHash: types.EmptyUncleHash, + Difficulty: big.NewInt(0xffffff), + Number: big.NewInt(500000), + Time: 1000000, + } + b.Run("big-generic", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + x1(1000014, h) + } + }) + b.Run("u256-generic", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + x2(1000014, h) + } + }) +} diff --git a/consensus/blake3pow/poem.go b/consensus/blake3pow/poem.go new file mode 100644 index 0000000000..9edc53be58 --- /dev/null +++ b/consensus/blake3pow/poem.go @@ -0,0 +1,130 @@ +package blake3pow + +import ( + "math/big" + + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/core/types" + "github.com/dominant-strategies/go-quai/params" + "modernc.org/mathutil" +) + +// CalcOrder returns the order of the block within the hierarchy of chains +func (blake3pow *Blake3pow) CalcOrder(header *types.Header) (*big.Int, int, error) { + if header.NumberU64() == 0 { + return common.Big0, common.PRIME_CTX, nil + } + + // Verify the seal and get the powHash for the given header + err := blake3pow.verifySeal(nil, header, true) + if err != nil { + return big0, -1, err + } + + // Get entropy reduction of this header + intrinsicS := blake3pow.IntrinsicLogS(header.Hash()) + + // This is the updated the threshold calculation based on the zone difficulty threshold + target := new(big.Int).Div(big2e256, header.Difficulty()).Bytes() + zoneThresholdS := blake3pow.IntrinsicLogS(common.BytesToHash(target)) + timeFactorHierarchyDepthMultiple := new(big.Int).Mul(params.TimeFactor, big.NewInt(common.HierarchyDepth)) + + // Prime case + primeEntropyThreshold := new(big.Int).Mul(timeFactorHierarchyDepthMultiple, timeFactorHierarchyDepthMultiple) + primeEntropyThreshold = new(big.Int).Mul(primeEntropyThreshold, zoneThresholdS) + primeBlockThreshold := new(big.Int).Quo(primeEntropyThreshold, big.NewInt(2)) + primeEntropyThreshold = new(big.Int).Sub(primeEntropyThreshold, primeBlockThreshold) + + primeBlockEntropyThresholdAdder, _ := mathutil.BinaryLog(primeBlockThreshold, 8) + primeBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, big.NewInt(int64(primeBlockEntropyThresholdAdder))) + + totalDeltaS := new(big.Int).Add(header.ParentDeltaS(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX)) + totalDeltaS.Add(totalDeltaS, intrinsicS) + if intrinsicS.Cmp(primeBlockEntropyThreshold) > 0 && totalDeltaS.Cmp(primeEntropyThreshold) > 0 { + return intrinsicS, common.PRIME_CTX, nil + } + + // Region case + regionEntropyThreshold := new(big.Int).Mul(timeFactorHierarchyDepthMultiple, zoneThresholdS) + regionBlockThreshold := new(big.Int).Quo(regionEntropyThreshold, big.NewInt(2)) + regionEntropyThreshold = new(big.Int).Sub(regionEntropyThreshold, regionBlockThreshold) + + regionBlockEntropyThresholdAdder, _ := mathutil.BinaryLog(regionBlockThreshold, 8) + regionBlockEntropyThreshold := new(big.Int).Add(zoneThresholdS, big.NewInt(int64(regionBlockEntropyThresholdAdder))) + + totalDeltaS = new(big.Int).Add(header.ParentDeltaS(common.ZONE_CTX), intrinsicS) + if intrinsicS.Cmp(regionBlockEntropyThreshold) > 0 && totalDeltaS.Cmp(regionEntropyThreshold) > 0 { + return intrinsicS, common.REGION_CTX, nil + } + + // Zone case + return intrinsicS, common.ZONE_CTX, nil +} + +// IntrinsicLogS returns the logarithm of the intrinsic entropy reduction of a PoW hash +func (blake3pow *Blake3pow) IntrinsicLogS(powHash common.Hash) *big.Int { + x := new(big.Int).SetBytes(powHash.Bytes()) + d := new(big.Int).Div(big2e256, x) + c, m := mathutil.BinaryLog(d, mantBits) + bigBits := new(big.Int).Mul(big.NewInt(int64(c)), new(big.Int).Exp(big.NewInt(2), big.NewInt(mantBits), nil)) + bigBits = new(big.Int).Add(bigBits, m) + return bigBits +} + +// TotalLogS() returns the total entropy reduction if the chain since genesis to the given header +func (blake3pow *Blake3pow) TotalLogS(header *types.Header) *big.Int { + intrinsicS, order, err := blake3pow.CalcOrder(header) + if err != nil { + return big.NewInt(0) + } + switch order { + case common.PRIME_CTX: + totalS := new(big.Int).Add(header.ParentEntropy(common.PRIME_CTX), header.ParentDeltaS(common.REGION_CTX)) + totalS.Add(totalS, header.ParentDeltaS(common.ZONE_CTX)) + totalS.Add(totalS, intrinsicS) + return totalS + case common.REGION_CTX: + totalS := new(big.Int).Add(header.ParentEntropy(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX)) + totalS.Add(totalS, intrinsicS) + return totalS + case common.ZONE_CTX: + totalS := new(big.Int).Add(header.ParentEntropy(common.ZONE_CTX), intrinsicS) + return totalS + } + return big.NewInt(0) +} + +func (blake3pow *Blake3pow) TotalLogPhS(header *types.Header) *big.Int { + switch common.NodeLocation.Context() { + case common.PRIME_CTX: + totalS := header.ParentEntropy(common.PRIME_CTX) + return totalS + case common.REGION_CTX: + totalS := new(big.Int).Add(header.ParentEntropy(common.PRIME_CTX), header.ParentDeltaS(common.REGION_CTX)) + return totalS + case common.ZONE_CTX: + totalS := new(big.Int).Add(header.ParentEntropy(common.PRIME_CTX), header.ParentDeltaS(common.REGION_CTX)) + totalS.Add(totalS, header.ParentDeltaS(common.ZONE_CTX)) + return totalS + } + return big.NewInt(0) +} + +func (blake3pow *Blake3pow) DeltaLogS(header *types.Header) *big.Int { + intrinsicS, order, err := blake3pow.CalcOrder(header) + if err != nil { + return big.NewInt(0) + } + switch order { + case common.PRIME_CTX: + return big.NewInt(0) + case common.REGION_CTX: + totalDeltaS := new(big.Int).Add(header.ParentDeltaS(common.REGION_CTX), header.ParentDeltaS(common.ZONE_CTX)) + totalDeltaS = new(big.Int).Add(totalDeltaS, intrinsicS) + return totalDeltaS + case common.ZONE_CTX: + totalDeltaS := new(big.Int).Add(header.ParentDeltaS(common.ZONE_CTX), intrinsicS) + return totalDeltaS + } + return big.NewInt(0) +} diff --git a/consensus/blake3pow/sealer.go b/consensus/blake3pow/sealer.go new file mode 100644 index 0000000000..9a15cbb8ab --- /dev/null +++ b/consensus/blake3pow/sealer.go @@ -0,0 +1,423 @@ +package blake3pow + +import ( + "bytes" + "context" + crand "crypto/rand" + "encoding/json" + "errors" + "math" + "math/big" + "math/rand" + "net/http" + "runtime" + "sync" + "time" + + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/common/hexutil" + "github.com/dominant-strategies/go-quai/core/types" + "github.com/dominant-strategies/go-quai/log" +) + +const ( + // staleThreshold is the maximum depth of the acceptable stale but valid blake3pow solution. + staleThreshold = 7 + mantBits = 64 +) + +var ( + errNoMiningWork = errors.New("no mining work available yet") + errInvalidSealResult = errors.New("invalid or stale proof-of-work solution") +) + +// Seal implements consensus.Engine, attempting to find a nonce that satisfies +// the header's difficulty requirements. +func (blake3pow *Blake3pow) Seal(header *types.Header, results chan<- *types.Header, stop <-chan struct{}) error { + // If we're running a fake PoW, simply return a 0 nonce immediately + if blake3pow.config.PowMode == ModeFake || blake3pow.config.PowMode == ModeFullFake { + header.SetNonce(types.BlockNonce{}) + select { + case results <- header: + default: + blake3pow.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", header.SealHash()) + } + return nil + } + // If we're running a shared PoW, delegate sealing to it + if blake3pow.shared != nil { + return blake3pow.shared.Seal(header, results, stop) + } + // Create a runner and the multiple search threads it directs + abort := make(chan struct{}) + + blake3pow.lock.Lock() + threads := blake3pow.threads + if blake3pow.rand == nil { + seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + blake3pow.lock.Unlock() + return err + } + blake3pow.rand = rand.New(rand.NewSource(seed.Int64())) + } + blake3pow.lock.Unlock() + if threads == 0 { + threads = runtime.NumCPU() + } + if threads < 0 { + threads = 0 // Allows disabling local mining without extra logic around local/remote + } + // Push new work to remote sealer + if blake3pow.remote != nil { + blake3pow.remote.workCh <- &sealTask{header: header, results: results} + } + var ( + pend sync.WaitGroup + locals = make(chan *types.Header) + ) + for i := 0; i < threads; i++ { + pend.Add(1) + go func(id int, nonce uint64) { + defer pend.Done() + blake3pow.mine(header, id, nonce, abort, locals) + }(i, uint64(blake3pow.rand.Int63())) + } + // Wait until sealing is terminated or a nonce is found + go func() { + var result *types.Header + select { + case <-stop: + // Outside abort, stop all miner threads + close(abort) + case result = <-locals: + // One of the threads found a block, abort all others + select { + case results <- result: + default: + blake3pow.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", header.SealHash()) + } + close(abort) + case <-blake3pow.update: + // Thread count was changed on user request, restart + close(abort) + if err := blake3pow.Seal(header, results, stop); err != nil { + blake3pow.config.Log.Error("Failed to restart sealing after update", "err", err) + } + } + // Wait for all miners to terminate and return the block + pend.Wait() + }() + return nil +} + +// mine is the actual proof-of-work miner that searches for a nonce starting from +// seed that results in correct final header difficulty. +func (blake3pow *Blake3pow) mine(header *types.Header, id int, seed uint64, abort chan struct{}, found chan *types.Header) { + // Extract some data from the header + var ( + target = new(big.Int).Div(big2e256, header.Difficulty()) + ) + // Start generating random nonces until we abort or find a good one + var ( + attempts = int64(0) + nonce = seed + powBuffer = new(big.Int) + ) + logger := log.Log + logger.Trace("Started blake3pow search for new nonces", "seed", seed) +search: + for { + select { + case <-abort: + // Mining terminated, update stats and abort + logger.Trace("Blake3pow nonce search aborted", "attempts", nonce-seed) + blake3pow.hashrate.Mark(attempts) + break search + + default: + // We don't have to update hash rate on every nonce, so update after after 2^X nonces + attempts++ + if (attempts % (1 << 15)) == 0 { + blake3pow.hashrate.Mark(attempts) + attempts = 0 + } + // Compute the PoW value of this nonce + header = types.CopyHeader(header) + header.SetNonce(types.EncodeNonce(nonce)) + hash := header.Hash().Bytes() + if powBuffer.SetBytes(hash).Cmp(target) <= 0 { + // Correct nonce found, create a new header with it + + // Seal and return a block (if still needed) + select { + case found <- header: + logger.Trace("Blake3pow nonce found and reported", "attempts", nonce-seed, "nonce", nonce) + case <-abort: + logger.Trace("Blake3pow nonce found but discarded", "attempts", nonce-seed, "nonce", nonce) + } + break search + } + nonce++ + } + } +} + +// This is the timeout for HTTP requests to notify external miners. +const remoteSealerTimeout = 1 * time.Second + +type remoteSealer struct { + works map[common.Hash]*types.Header + rates map[common.Hash]hashrate + currentHeader *types.Header + currentWork [4]string + notifyCtx context.Context + cancelNotify context.CancelFunc // cancels all notification requests + reqWG sync.WaitGroup // tracks notification request goroutines + + blake3pow *Blake3pow + noverify bool + notifyURLs []string + results chan<- *types.Header + workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer + fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work + submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result + fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer. + submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate + requestExit chan struct{} + exitCh chan struct{} +} + +// sealTask wraps a seal header with relative result channel for remote sealer thread. +type sealTask struct { + header *types.Header + results chan<- *types.Header +} + +// mineResult wraps the pow solution parameters for the specified block. +type mineResult struct { + nonce types.BlockNonce + hash common.Hash + + errc chan error +} + +// hashrate wraps the hash rate submitted by the remote sealer. +type hashrate struct { + id common.Hash + ping time.Time + rate uint64 + + done chan struct{} +} + +// sealWork wraps a seal work package for remote sealer. +type sealWork struct { + errc chan error + res chan [4]string +} + +func startRemoteSealer(blake3pow *Blake3pow, urls []string, noverify bool) *remoteSealer { + ctx, cancel := context.WithCancel(context.Background()) + s := &remoteSealer{ + blake3pow: blake3pow, + noverify: noverify, + notifyURLs: urls, + notifyCtx: ctx, + cancelNotify: cancel, + works: make(map[common.Hash]*types.Header), + rates: make(map[common.Hash]hashrate), + workCh: make(chan *sealTask), + fetchWorkCh: make(chan *sealWork), + submitWorkCh: make(chan *mineResult), + fetchRateCh: make(chan chan uint64), + submitRateCh: make(chan *hashrate), + requestExit: make(chan struct{}), + exitCh: make(chan struct{}), + } + go s.loop() + return s +} + +func (s *remoteSealer) loop() { + defer func() { + s.blake3pow.config.Log.Trace("Blake3pow remote sealer is exiting") + s.cancelNotify() + s.reqWG.Wait() + close(s.exitCh) + }() + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + for { + select { + case work := <-s.workCh: + // Update current work with new received header. + // Note same work can be past twice, happens when changing CPU threads. + s.results = work.results + s.makeWork(work.header) + s.notifyWork() + + case work := <-s.fetchWorkCh: + // Return current mining work to remote miner. + if s.currentHeader == nil { + work.errc <- errNoMiningWork + } else { + work.res <- s.currentWork + } + + case result := <-s.submitWorkCh: + // Verify submitted PoW solution based on maintained mining blocks. + if s.submitWork(result.nonce, result.hash) { + result.errc <- nil + } else { + result.errc <- errInvalidSealResult + } + + case result := <-s.submitRateCh: + // Trace remote sealer's hash rate by submitted value. + s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()} + close(result.done) + + case req := <-s.fetchRateCh: + // Gather all hash rate submitted by remote sealer. + var total uint64 + for _, rate := range s.rates { + // this could overflow + total += rate.rate + } + req <- total + + case <-ticker.C: + // Clear stale submitted hash rate. + for id, rate := range s.rates { + if time.Since(rate.ping) > 10*time.Second { + delete(s.rates, id) + } + } + // Clear stale pending blocks + if s.currentHeader != nil { + for hash, header := range s.works { + if header.NumberU64()+staleThreshold <= s.currentHeader.NumberU64() { + delete(s.works, hash) + } + } + } + + case <-s.requestExit: + return + } + } +} + +// makeWork creates a work package for external miner. +// +// The work package consists of 3 strings: +// +// result[0], 32 bytes hex encoded current header pow-hash +// result[1], 32 bytes hex encoded seed hash used for DAG +// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3], hex encoded header number +func (s *remoteSealer) makeWork(header *types.Header) { + hash := header.SealHash() + s.currentWork[0] = hash.Hex() + s.currentWork[1] = hexutil.EncodeBig(header.Number()) + s.currentWork[2] = common.BytesToHash(new(big.Int).Div(big2e256, header.Difficulty()).Bytes()).Hex() + + // Trace the seal work fetched by remote sealer. + s.currentHeader = header + s.works[hash] = header +} + +// notifyWork notifies all the specified mining endpoints of the availability of +// new work to be processed. +func (s *remoteSealer) notifyWork() { + work := s.currentWork + + // Encode the JSON payload of the notification. When NotifyFull is set, + // this is the complete block header, otherwise it is a JSON array. + var blob []byte + if s.blake3pow.config.NotifyFull { + blob, _ = json.Marshal(s.currentHeader) + } else { + blob, _ = json.Marshal(work) + } + + s.reqWG.Add(len(s.notifyURLs)) + for _, url := range s.notifyURLs { + go s.sendNotification(s.notifyCtx, url, blob, work) + } +} + +func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) { + defer s.reqWG.Done() + + req, err := http.NewRequest("POST", url, bytes.NewReader(json)) + if err != nil { + s.blake3pow.config.Log.Warn("Can't create remote miner notification", "err", err) + return + } + ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout) + defer cancel() + req = req.WithContext(ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + s.blake3pow.config.Log.Warn("Failed to notify remote miner", "err", err) + } else { + s.blake3pow.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2]) + resp.Body.Close() + } +} + +// submitWork verifies the submitted pow solution, returning +// whether the solution was accepted or not (not can be both a bad pow as well as +// any other error, like no pending work or stale mining result). +func (s *remoteSealer) submitWork(nonce types.BlockNonce, sealhash common.Hash) bool { + if s.currentHeader == nil { + s.blake3pow.config.Log.Error("Pending work without block", "sealhash", sealhash) + return false + } + // Make sure the work submitted is present + header := s.works[sealhash] + if header == nil { + s.blake3pow.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentHeader.NumberU64()) + return false + } + // Verify the correctness of submitted result. + header.SetNonce(nonce) + + start := time.Now() + if !s.noverify { + if err := s.blake3pow.verifySeal(nil, header, true); err != nil { + s.blake3pow.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err) + return false + } + } + // Make sure the result channel is assigned. + if s.results == nil { + s.blake3pow.config.Log.Warn("Blake3pow result channel is empty, submitted mining result is rejected") + return false + } + s.blake3pow.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start))) + + // Solutions seems to be valid, return to the miner and notify acceptance. + solution := header + + // The submitted solution is within the scope of acceptance. + if solution.NumberU64()+staleThreshold > s.currentHeader.NumberU64() { + select { + case s.results <- solution: + s.blake3pow.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) + return true + default: + s.blake3pow.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash) + return false + } + } + // The submitted block is too old to accept, drop it. + s.blake3pow.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash()) + return false +} diff --git a/consensus/blake3pow/sealer_test.go b/consensus/blake3pow/sealer_test.go new file mode 100644 index 0000000000..b5d36130d5 --- /dev/null +++ b/consensus/blake3pow/sealer_test.go @@ -0,0 +1,279 @@ +package blake3pow + +import ( + "encoding/json" + "io/ioutil" + "math/big" + "net/http" + "net/http/httptest" + "strconv" + "testing" + "time" + + "github.com/dominant-strategies/go-quai/common" + "github.com/dominant-strategies/go-quai/core/types" + "github.com/dominant-strategies/go-quai/internal/testlog" + "github.com/dominant-strategies/go-quai/log" +) + +// Tests whether remote HTTP servers are correctly notified of new work. +func TestRemoteNotify(t *testing.T) { + // Start a simple web server to capture notifications. + sink := make(chan [3]string) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("failed to read miner notification: %v", err) + } + var work [3]string + if err := json.Unmarshal(blob, &work); err != nil { + t.Errorf("failed to unmarshal miner notification: %v", err) + } + sink <- work + })) + defer server.Close() + + // Create the custom blake3pow engine. + blake3pow := NewTester([]string{server.URL}, false) + defer blake3pow.Close() + + // Stream a work task and ensure the notification bubbles out. + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + blake3pow.Seal(nil, block, nil, nil) + select { + case work := <-sink: + if want := blake3pow.SealHash(header).Hex(); work[0] != want { + t.Errorf("work packet hash mismatch: have %s, want %s", work[0], want) + } + if want := common.BytesToHash(SeedHash(header.Number().Uint64())).Hex(); work[1] != want { + t.Errorf("work packet seed mismatch: have %s, want %s", work[1], want) + } + target := new(big.Int).Div(new(big.Int).Lsh(big.NewInt(1), 256), header.Difficulty()) + if want := common.BytesToHash(target.Bytes()).Hex(); work[2] != want { + t.Errorf("work packet target mismatch: have %s, want %s", work[2], want) + } + case <-time.After(3 * time.Second): + t.Fatalf("notification timed out") + } +} + +// Tests whether remote HTTP servers are correctly notified of new work. (Full pending block body / --miner.notify.full) +func TestRemoteNotifyFull(t *testing.T) { + // Start a simple web server to capture notifications. + sink := make(chan map[string]interface{}) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("failed to read miner notification: %v", err) + } + var work map[string]interface{} + if err := json.Unmarshal(blob, &work); err != nil { + t.Errorf("failed to unmarshal miner notification: %v", err) + } + sink <- work + })) + defer server.Close() + + // Create the custom blake3pow engine. + config := Config{ + PowMode: ModeTest, + NotifyFull: true, + Log: testlog.Logger(t, log.LvlWarn), + } + blake3pow := New(config, []string{server.URL}, false) + defer blake3pow.Close() + + // Stream a work task and ensure the notification bubbles out. + header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + + blake3pow.Seal(nil, block, nil, nil) + select { + case work := <-sink: + if want := "0x" + strconv.FormatUint(header.Number().Uint64(), 16); work["number"] != want { + t.Errorf("pending block number mismatch: have %v, want %v", work["number"], want) + } + if want := "0x" + header.Difficulty().Text(16); work["difficulty"] != want { + t.Errorf("pending block difficulty mismatch: have %s, want %s", work["difficulty"], want) + } + case <-time.After(3 * time.Second): + t.Fatalf("notification timed out") + } +} + +// Tests that pushing work packages fast to the miner doesn't cause any data race +// issues in the notifications. +func TestRemoteMultiNotify(t *testing.T) { + // Start a simple web server to capture notifications. + sink := make(chan [3]string, 64) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("failed to read miner notification: %v", err) + } + var work [3]string + if err := json.Unmarshal(blob, &work); err != nil { + t.Errorf("failed to unmarshal miner notification: %v", err) + } + sink <- work + })) + defer server.Close() + + // Create the custom blake3pow engine. + blake3pow := NewTester([]string{server.URL}, false) + blake3pow.config.Log = testlog.Logger(t, log.LvlWarn) + defer blake3pow.Close() + + // Provide a results reader. + // Otherwise the unread results will be logged asynchronously + // and this can happen after the test is finished, causing a panic. + results := make(chan *types.Block, cap(sink)) + + // Stream a lot of work task and ensure all the notifications bubble out. + for i := 0; i < cap(sink); i++ { + header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + blake3pow.Seal(nil, block, results, nil) + } + + for i := 0; i < cap(sink); i++ { + select { + case <-sink: + <-results + case <-time.After(10 * time.Second): + t.Fatalf("notification %d timed out", i) + } + } +} + +// Tests that pushing work packages fast to the miner doesn't cause any data race +// issues in the notifications. Full pending block body / --miner.notify.full) +func TestRemoteMultiNotifyFull(t *testing.T) { + // Start a simple web server to capture notifications. + sink := make(chan map[string]interface{}, 64) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + blob, err := ioutil.ReadAll(req.Body) + if err != nil { + t.Errorf("failed to read miner notification: %v", err) + } + var work map[string]interface{} + if err := json.Unmarshal(blob, &work); err != nil { + t.Errorf("failed to unmarshal miner notification: %v", err) + } + sink <- work + })) + defer server.Close() + + // Create the custom blake3pow engine. + config := Config{ + PowMode: ModeTest, + NotifyFull: true, + Log: testlog.Logger(t, log.LvlWarn), + } + blake3pow := New(config, []string{server.URL}, false) + defer blake3pow.Close() + + // Provide a results reader. + // Otherwise the unread results will be logged asynchronously + // and this can happen after the test is finished, causing a panic. + results := make(chan *types.Block, cap(sink)) + + // Stream a lot of work task and ensure all the notifications bubble out. + for i := 0; i < cap(sink); i++ { + header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)} + block := types.NewBlockWithHeader(header) + blake3pow.Seal(nil, block, results, nil) + } + + for i := 0; i < cap(sink); i++ { + select { + case <-sink: + <-results + case <-time.After(10 * time.Second): + t.Fatalf("notification %d timed out", i) + } + } +} + +// Tests whether stale solutions are correctly processed. +func TestStaleSubmission(t *testing.T) { + blake3pow := NewTester(nil, true) + defer blake3pow.Close() + api := &API{blake3pow} + + fakeNonce, fakeDigest := types.BlockNonce{0x01, 0x02, 0x03}, common.HexToHash("deadbeef") + + testcases := []struct { + headers []*types.Header + submitIndex int + submitRes bool + }{ + // Case1: submit solution for the latest mining package + { + []*types.Header{ + {ParentHash: common.BytesToHash([]byte{0xa}), Number: big.NewInt(1), Difficulty: big.NewInt(100000000)}, + }, + 0, + true, + }, + // Case2: submit solution for the previous package but have same parent. + { + []*types.Header{ + {ParentHash: common.BytesToHash([]byte{0xb}), Number: big.NewInt(2), Difficulty: big.NewInt(100000000)}, + {ParentHash: common.BytesToHash([]byte{0xb}), Number: big.NewInt(2), Difficulty: big.NewInt(100000001)}, + }, + 0, + true, + }, + // Case3: submit stale but acceptable solution + { + []*types.Header{ + {ParentHash: common.BytesToHash([]byte{0xc}), Number: big.NewInt(3), Difficulty: big.NewInt(100000000)}, + {ParentHash: common.BytesToHash([]byte{0xd}), Number: big.NewInt(9), Difficulty: big.NewInt(100000000)}, + }, + 0, + true, + }, + // Case4: submit very old solution + { + []*types.Header{ + {ParentHash: common.BytesToHash([]byte{0xe}), Number: big.NewInt(10), Difficulty: big.NewInt(100000000)}, + {ParentHash: common.BytesToHash([]byte{0xf}), Number: big.NewInt(17), Difficulty: big.NewInt(100000000)}, + }, + 0, + false, + }, + } + results := make(chan *types.Block, 16) + + for id, c := range testcases { + for _, h := range c.headers { + blake3pow.Seal(nil, types.NewBlockWithHeader(h), results, nil) + } + if res := api.SubmitWork(fakeNonce, blake3pow.SealHash(c.headers[c.submitIndex]), fakeDigest); res != c.submitRes { + t.Errorf("case %d submit result mismatch, want %t, get %t", id+1, c.submitRes, res) + } + if !c.submitRes { + continue + } + select { + case res := <-results: + if res.Header().Nonce() != fakeNonce { + t.Errorf("case %d block nonce mismatch, want %x, get %x", id+1, fakeNonce, res.Header().Nonce()) + } + if res.Header().Difficulty().Uint64() != c.headers[c.submitIndex].Difficulty().Uint64() { + t.Errorf("case %d block difficulty mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Difficulty(), res.Header().Difficulty()) + } + if res.Header().Number().Uint64() != c.headers[c.submitIndex].Number().Uint64() { + t.Errorf("case %d block number mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Number().Uint64(), res.Header().Number().Uint64()) + } + if res.Header().ParentHash() != c.headers[c.submitIndex].ParentHash() { + t.Errorf("case %d block parent hash mismatch, want %s, get %s", id+1, c.headers[c.submitIndex].ParentHash().Hex(), res.Header().ParentHash().Hex()) + } + case <-time.NewTimer(time.Second).C: + t.Errorf("case %d fetch blake3pow result timeout", id+1) + } + } +} diff --git a/core/genesis.go b/core/genesis.go index f5fce51c82..145950d15e 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -213,7 +213,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis) (*params // Special case: don't change the existing config of a non-mainnet chain if no new // config is supplied. These chains would get AllProtocolChanges (and a compat error) // if we just continued here. - if genesis == nil && stored != params.ColosseumGenesisHash { + if genesis == nil && stored != params.ProgpowColosseumGenesisHash { return storedcfg, stored, nil } // Check config compatibility and write the config. Compatibility errors @@ -231,16 +231,28 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { switch { case g != nil: return g.Config - case ghash == params.ColosseumGenesisHash: - return params.ColosseumChainConfig - case ghash == params.GardenGenesisHash: - return params.GardenChainConfig - case ghash == params.OrchardGenesisHash: - return params.OrchardChainConfig - case ghash == params.GalenaGenesisHash: - return params.GalenaChainConfig - case ghash == params.LocalGenesisHash: - return params.LocalChainConfig + case ghash == params.ProgpowColosseumGenesisHash: + return params.ProgpowColosseumChainConfig + case ghash == params.ProgpowGardenGenesisHash: + return params.ProgpowGardenChainConfig + case ghash == params.ProgpowOrchardGenesisHash: + return params.ProgpowOrchardChainConfig + case ghash == params.ProgpowGalenaGenesisHash: + return params.ProgpowGalenaChainConfig + case ghash == params.ProgpowLocalGenesisHash: + return params.ProgpowLocalChainConfig + // Blake3 chain configs + case ghash == params.Blake3PowColosseumGenesisHash: + return params.Blake3PowColosseumChainConfig + case ghash == params.Blake3PowGardenGenesisHash: + return params.Blake3PowGardenChainConfig + case ghash == params.Blake3PowOrchardGenesisHash: + return params.Blake3PowOrchardChainConfig + case ghash == params.Blake3PowGalenaGenesisHash: + return params.Blake3PowGalenaChainConfig + case ghash == params.Blake3PowLocalGenesisHash: + return params.Blake3PowLocalChainConfig + default: return params.AllProgpowProtocolChanges } @@ -311,13 +323,22 @@ func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big // DefaultGenesisBlock returns the Latest default Genesis block. // Currently it returns the DefaultColosseumGenesisBlock. func DefaultGenesisBlock() *Genesis { - return DefaultColosseumGenesisBlock() + return DefaultColosseumGenesisBlock("progpow") } // DefaultColosseumGenesisBlock returns the Quai Colosseum testnet genesis block. -func DefaultColosseumGenesisBlock() *Genesis { +func DefaultColosseumGenesisBlock(consensusEngine string) *Genesis { + if consensusEngine == "blake3" { + return &Genesis{ + Config: params.Blake3PowColosseumChainConfig, + Nonce: 66, + ExtraData: hexutil.MustDecode("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fb"), + GasLimit: 5000000, + Difficulty: big.NewInt(2000000), + } + } return &Genesis{ - Config: params.ColosseumChainConfig, + Config: params.ProgpowColosseumChainConfig, Nonce: 66, ExtraData: hexutil.MustDecode("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fb"), GasLimit: 5000000, @@ -326,9 +347,18 @@ func DefaultColosseumGenesisBlock() *Genesis { } // DefaultGardenGenesisBlock returns the Garden testnet genesis block. -func DefaultGardenGenesisBlock() *Genesis { +func DefaultGardenGenesisBlock(consensusEngine string) *Genesis { + if consensusEngine == "blake3" { + return &Genesis{ + Config: params.Blake3PowGardenChainConfig, + Nonce: 66, + ExtraData: hexutil.MustDecode("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fb"), + GasLimit: 5000000, + Difficulty: big.NewInt(4000000), + } + } return &Genesis{ - Config: params.GardenChainConfig, + Config: params.ProgpowGardenChainConfig, Nonce: 0, ExtraData: hexutil.MustDecode("0x3535353535353535353535353535353535353535353535353535353535353539"), GasLimit: 5000000, @@ -337,9 +367,18 @@ func DefaultGardenGenesisBlock() *Genesis { } // DefaultOrchardGenesisBlock returns the Orchard testnet genesis block. -func DefaultOrchardGenesisBlock() *Genesis { +func DefaultOrchardGenesisBlock(consensusEngine string) *Genesis { + if consensusEngine == "blake3" { + return &Genesis{ + Config: params.Blake3PowOrchardChainConfig, + Nonce: 66, + ExtraData: hexutil.MustDecode("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fc"), + GasLimit: 5000000, + Difficulty: big.NewInt(4000000), + } + } return &Genesis{ - Config: params.OrchardChainConfig, + Config: params.ProgpowOrchardChainConfig, Nonce: 0, ExtraData: hexutil.MustDecode("0x3535353535353535353535353535353535353535353535353535353535353536"), GasLimit: 5000000, @@ -348,9 +387,18 @@ func DefaultOrchardGenesisBlock() *Genesis { } // DefaultGalenaGenesisBlock returns the Galena testnet genesis block. -func DefaultGalenaGenesisBlock() *Genesis { +func DefaultGalenaGenesisBlock(consensusEngine string) *Genesis { + if consensusEngine == "blake3" { + return &Genesis{ + Config: params.Blake3PowGalenaChainConfig, + Nonce: 66, + ExtraData: hexutil.MustDecode("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fb"), + GasLimit: 5000000, + Difficulty: big.NewInt(4000000), + } + } return &Genesis{ - Config: params.GalenaChainConfig, + Config: params.ProgpowGalenaChainConfig, Nonce: 0, ExtraData: hexutil.MustDecode("0x3535353535353535353535353535353535353535353535353535353535353537"), GasLimit: 5000000, @@ -359,9 +407,18 @@ func DefaultGalenaGenesisBlock() *Genesis { } // DefaultLocalGenesisBlock returns the Local testnet genesis block. -func DefaultLocalGenesisBlock() *Genesis { +func DefaultLocalGenesisBlock(consensusEngine string) *Genesis { + if consensusEngine == "blake3" { + return &Genesis{ + Config: params.Blake3PowLocalChainConfig, + Nonce: 66, + ExtraData: hexutil.MustDecode("0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fb"), + GasLimit: 5000000, + Difficulty: big.NewInt(300000), + } + } return &Genesis{ - Config: params.LocalChainConfig, + Config: params.ProgpowLocalChainConfig, Nonce: 0, ExtraData: hexutil.MustDecode("0x3535353535353535353535353535353535353535353535353535353535353535"), GasLimit: 5000000, diff --git a/core/types/block.go b/core/types/block.go index 07309a0549..1fcc47883f 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -158,6 +158,7 @@ func EmptyHeader() *Header { h.number = make([]*big.Int, common.HierarchyDepth) h.difficulty = big.NewInt(0) h.root = EmptyRootHash + h.mixHash = EmptyRootHash h.txHash = EmptyRootHash h.etxHash = EmptyRootHash h.etxRollupHash = EmptyRootHash diff --git a/eth/backend.go b/eth/backend.go index e20bb8fafb..6e917ed410 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -109,10 +109,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Quai, error) { } log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024) - // Transfer mining-related config to the progpow config. - progpowConfig := config.Progpow - progpowConfig.NotifyFull = config.Miner.NotifyFull - // Assemble the Quai object chainDb, err := stack.OpenDatabaseWithFreezer("chaindata", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, "eth/db/chaindata/", false) if err != nil { @@ -122,7 +118,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Quai, error) { if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { return nil, genesisErr } - log.Info("Initialised chain configuration", "config", chainConfig) if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil { log.Error("Failed to recover state", "error", err) @@ -131,7 +126,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Quai, error) { config: config, chainDb: chainDb, eventMux: stack.EventMux(), - engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &progpowConfig, config.Miner.Notify, config.Miner.Noverify, chainDb), closeBloomHandler: make(chan struct{}), networkID: config.NetworkId, gasPrice: config.Miner.GasPrice, @@ -141,6 +135,18 @@ func New(stack *node.Node, config *ethconfig.Config) (*Quai, error) { p2pServer: stack.Server(), } + if config.ConsensusEngine == "blake3" { + blake3Config := config.Blake3Pow + blake3Config.NotifyFull = config.Miner.NotifyFull + eth.engine = ethconfig.CreateBlake3ConsensusEngine(stack, chainConfig, &blake3Config, config.Miner.Notify, config.Miner.Noverify, chainDb) + } else { + // Transfer mining-related config to the progpow config. + progpowConfig := config.Progpow + progpowConfig.NotifyFull = config.Miner.NotifyFull + eth.engine = ethconfig.CreateProgpowConsensusEngine(stack, chainConfig, &progpowConfig, config.Miner.Notify, config.Miner.Noverify, chainDb) + } + log.Info("Initialised chain configuration", "config", chainConfig) + bcVersion := rawdb.ReadDatabaseVersion(chainDb) var dbVer = "" if bcVersion != nil { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 064de35002..13afbbf112 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -23,6 +23,7 @@ import ( "github.com/dominant-strategies/go-quai/common" "github.com/dominant-strategies/go-quai/consensus" + "github.com/dominant-strategies/go-quai/consensus/blake3pow" "github.com/dominant-strategies/go-quai/consensus/progpow" "github.com/dominant-strategies/go-quai/core" "github.com/dominant-strategies/go-quai/eth/downloader" @@ -121,9 +122,15 @@ type Config struct { // Mining options Miner core.Config + // Consensus Engine + ConsensusEngine string + // Progpow options Progpow progpow.Config + // Blake3 options + Blake3Pow blake3pow.Config + // Transaction pool options TxPool core.TxPoolConfig @@ -159,8 +166,8 @@ type Config struct { SlicesRunning []common.Location } -// CreateConsensusEngine creates a consensus engine for the given chain configuration. -func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *progpow.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { +// CreateProgpowConsensusEngine creates a progpow consensus engine for the given chain configuration. +func CreateProgpowConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *progpow.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { // Otherwise assume proof-of-work switch config.PowMode { case progpow.ModeFake: @@ -178,3 +185,23 @@ func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, co engine.SetThreads(-1) // Disable CPU mining return engine } + +// CreateBlake3ConsensusEngine creates a progpow consensus engine for the given chain configuration. +func CreateBlake3ConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *blake3pow.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { + // Otherwise assume proof-of-work + switch config.PowMode { + case blake3pow.ModeFake: + log.Warn("Progpow used in fake mode") + case blake3pow.ModeTest: + log.Warn("Progpow used in test mode") + case blake3pow.ModeShared: + log.Warn("Progpow used in shared mode") + } + engine := blake3pow.New(blake3pow.Config{ + PowMode: config.PowMode, + NotifyFull: config.NotifyFull, + DurationLimit: config.DurationLimit, + }, notify, noverify) + engine.SetThreads(-1) // Disable CPU mining + return engine +} diff --git a/network.env.dist b/network.env.dist index 6f22436205..b8d279b8bd 100644 --- a/network.env.dist +++ b/network.env.dist @@ -125,3 +125,4 @@ ENABLE_PPROF=false # Output format variables SHOW_COLORS=true +RUN_BLAKE3=false diff --git a/params/bootnodes.go b/params/bootnodes.go index 40e0828b2f..ed520a1049 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -38,9 +38,9 @@ var GardenBootnodes = []string{ // OrchardBootnodes are the enode URLs of the P2P bootstrap nodes running on the // Orchard test network var OrchardBootnodes = []string{ - "enode://f99403abcfbee37e3232e6bb4d7fda4d70496585a53857ccb4aff5ec32d0f27186b5097430d5806f20f2003f35cfec5d778598a3945d530f212b7072caab9b8a@35.188.17.207", // us-central1-b + "enode://f99403abcfbee37e3232e6bb4d7fda4d70496585a53857ccb4aff5ec32d0f27186b5097430d5806f20f2003f35cfec5d778598a3945d530f212b7072caab9b8a@35.188.17.207", // us-central1-b "enode://142e48e3c36e5fe21aebf2941f2e63eb4720febe67de17dd84baf010e33c76275567ede53674007ab2848eec53022cd0cb94bcbea10960ae93edb5497c8caa2a@104.198.69.162", // us-central1-a - "enode://d6d27b273682f8abc7ffff04dc9006bd40f0a079a8ba24da351b714506bb82c1f106ff073fa04983345aef15c876c602209b48b37d8ee10dad581fd1d9db9263@34.23.150.43", // us-east1-c + "enode://d6d27b273682f8abc7ffff04dc9006bd40f0a079a8ba24da351b714506bb82c1f106ff073fa04983345aef15c876c602209b48b37d8ee10dad581fd1d9db9263@34.23.150.43", // us-east1-c } // GalenaBootnodes are the enode URLs of the P2P bootstrap nodes running on the @@ -60,13 +60,21 @@ const dnsPrefix = "enrtree://ALE24Z2TEZV2XK46RXVB6IIN5HB5WTI4F4SMAVLYCAQIUPU53RS func KnownDNSNetwork(genesis common.Hash, protocol string) string { var net string switch genesis { - case ColosseumGenesisHash: + case ProgpowColosseumGenesisHash: net = "colosseum" - case GardenGenesisHash: + case ProgpowGardenGenesisHash: net = "garden" - case OrchardGenesisHash: + case ProgpowOrchardGenesisHash: net = "orchard" - case GalenaGenesisHash: + case ProgpowGalenaGenesisHash: + net = "galena" + case Blake3PowColosseumGenesisHash: + net = "colosseum" + case Blake3PowGardenGenesisHash: + net = "garden" + case Blake3PowOrchardGenesisHash: + net = "orchard" + case Blake3PowGalenaGenesisHash: net = "galena" default: return "" diff --git a/params/config.go b/params/config.go index e95980ae9c..4f9c1f2ffa 100644 --- a/params/config.go +++ b/params/config.go @@ -1,4 +1,4 @@ -// Copyright 2016 The go-ethereum Authors +// Cojyright 2016 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -25,47 +25,86 @@ import ( // Genesis hashes to enforce below configs on. var ( - ColosseumGenesisHash = common.HexToHash("0x937e935af7be23cee3a138dcc0e1a0ba1ccf2c9d085d144d4fb2e6fbd54fdd67") - GardenGenesisHash = common.HexToHash("0x8975f8760d317524559986d595058d4d66c05d62e0eda1d740cdee56a25788a5") - OrchardGenesisHash = common.HexToHash("0x5627aca8194b46ed071f92746ecf975542b12ce406905d715b4be8f044749956") - LocalGenesisHash = common.HexToHash("0x9e7149a4f5ff07675e0e7881f004cd0dc1f5a28bb9ba8c4de86794ba6fe80b60") - GalenaGenesisHash = common.HexToHash("0xe0a395a3fcd7ecbb28dd66eeceab2fb40db01a2bfbf9e5fbc5b93269104df19a") + // Progpow GenesisHashes + ProgpowColosseumGenesisHash = common.HexToHash("0x937e935af7be23cee3a138dcc0e1a0ba1ccf2c9d085d144d4fb2e6fbd54fdd67") + ProgpowGardenGenesisHash = common.HexToHash("0x8975f8760d317524559986d595058d4d66c05d62e0eda1d740cdee56a25788a5") + ProgpowOrchardGenesisHash = common.HexToHash("0x5627aca8194b46ed071f92746ecf975542b12ce406905d715b4be8f044749956") + ProgpowLocalGenesisHash = common.HexToHash("0x9e7149a4f5ff07675e0e7881f004cd0dc1f5a28bb9ba8c4de86794ba6fe80b60") + ProgpowGalenaGenesisHash = common.HexToHash("0xe0a395a3fcd7ecbb28dd66eeceab2fb40db01a2bfbf9e5fbc5b93269104df19a") + + // Blake3GenesisHashes + Blake3PowColosseumGenesisHash = common.HexToHash("0x5746089cbee3cde719c3d0599e31504793028c68e5df7acff956e917f72866f5") + Blake3PowGardenGenesisHash = common.HexToHash("0xdee75a7b24237d07f15392d7d5319a9421f838d84b9a6e6a8d1a4d74365ff2de") + Blake3PowOrchardGenesisHash = common.HexToHash("0x418ea8cd5f17277e4bb94cba7170a494fc53df23b915ed42a8fe9f6052a4327b") + Blake3PowLocalGenesisHash = common.HexToHash("0x9b747199047097790a4e376e4682ec682f7575d8fb07021e0e9d95e25dcdc140") + Blake3PowGalenaGenesisHash = common.HexToHash("0xdee75a7b24237d07f15392d7d5319a9421f838d84b9a6e6a8d1a4d74365ff2de") ) var ( // ColosseumChainConfig is the chain parameters to run a node on the Colosseum network. - ColosseumChainConfig = &ChainConfig{ + ProgpowColosseumChainConfig = &ChainConfig{ ChainID: big.NewInt(9000), Progpow: new(ProgpowConfig), - GenesisHash: ColosseumGenesisHash, + GenesisHash: ProgpowColosseumGenesisHash, + } + + Blake3PowColosseumChainConfig = &ChainConfig{ + ChainID: big.NewInt(9000), + Blake3Pow: new(Blake3powConfig), + GenesisHash: Blake3PowColosseumGenesisHash, } // GardenChainConfig contains the chain parameters to run a node on the Garden test network. - GardenChainConfig = &ChainConfig{ + ProgpowGardenChainConfig = &ChainConfig{ ChainID: big.NewInt(12000), Progpow: new(ProgpowConfig), - GenesisHash: GardenGenesisHash, + GenesisHash: ProgpowGardenGenesisHash, + } + + Blake3PowGardenChainConfig = &ChainConfig{ + ChainID: big.NewInt(12000), + Blake3Pow: new(Blake3powConfig), + GenesisHash: Blake3PowGardenGenesisHash, } // OrchardChainConfig contains the chain parameters to run a node on the Orchard test network. - OrchardChainConfig = &ChainConfig{ + ProgpowOrchardChainConfig = &ChainConfig{ ChainID: big.NewInt(15000), Progpow: new(ProgpowConfig), - GenesisHash: OrchardGenesisHash, + GenesisHash: ProgpowOrchardGenesisHash, + } + + Blake3PowOrchardChainConfig = &ChainConfig{ + ChainID: big.NewInt(15000), + Blake3Pow: new(Blake3powConfig), + GenesisHash: Blake3PowOrchardGenesisHash, } // GalenaChainConfig contains the chain parameters to run a node on the Galena test network. - GalenaChainConfig = &ChainConfig{ + ProgpowGalenaChainConfig = &ChainConfig{ ChainID: big.NewInt(17000), + Blake3Pow: new(Blake3powConfig), Progpow: new(ProgpowConfig), - GenesisHash: GalenaGenesisHash, + GenesisHash: ProgpowGalenaGenesisHash, + } + + Blake3PowGalenaChainConfig = &ChainConfig{ + ChainID: big.NewInt(17000), + Blake3Pow: new(Blake3powConfig), + GenesisHash: Blake3PowGalenaGenesisHash, } // LocalChainConfig contains the chain parameters to run a node on the Local test network. - LocalChainConfig = &ChainConfig{ + ProgpowLocalChainConfig = &ChainConfig{ ChainID: big.NewInt(1337), Progpow: new(ProgpowConfig), - GenesisHash: LocalGenesisHash, + GenesisHash: ProgpowLocalGenesisHash, + } + + Blake3PowLocalChainConfig = &ChainConfig{ + ChainID: big.NewInt(1337), + Blake3Pow: new(Blake3powConfig), + GenesisHash: Blake3PowLocalGenesisHash, } // AllProgpowProtocolChanges contains every protocol change introduced @@ -73,9 +112,9 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllProgpowProtocolChanges = &ChainConfig{big.NewInt(1337), new(ProgpowConfig), common.Hash{}} + AllProgpowProtocolChanges = &ChainConfig{big.NewInt(1337), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Hash{}} - TestChainConfig = &ChainConfig{big.NewInt(1), new(ProgpowConfig), common.Hash{}} + TestChainConfig = &ChainConfig{big.NewInt(1), "progpow", new(Blake3powConfig), new(ProgpowConfig), common.Hash{}} TestRules = TestChainConfig.Rules(new(big.Int)) ) @@ -87,8 +126,18 @@ var ( type ChainConfig struct { ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection // Various consensus engines - Progpow *ProgpowConfig `json:"progpow,omitempty"` - GenesisHash common.Hash + ConsensusEngine string + Blake3Pow *Blake3powConfig `json:"blake3pow,omitempty"` + Progpow *ProgpowConfig `json:"progpow,omitempty"` + GenesisHash common.Hash +} + +// Blake3powConfig is the consensus engine configs for proof-of-work based sealing. +type Blake3powConfig struct{} + +// String implements the stringer interface, returning the consensus engine details. +func (c *Blake3powConfig) String() string { + return "blake3pow" } // ProgpowConfig is the consensus engine configs for proof-of-work based sealing. @@ -103,6 +152,8 @@ func (c *ProgpowConfig) String() string { func (c *ChainConfig) String() string { var engine interface{} switch { + case c.Blake3Pow != nil: + engine = c.Blake3Pow case c.Progpow != nil: engine = c.Progpow default: