diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 3d7e96bf60..43f563159a 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -34,7 +35,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// This nil assignment ensures compile time that SimulatedBackend implements bind.ContractBackend.
@@ -61,7 +61,7 @@ func NewSimulatedBackend(alloc core.GenesisAlloc) *SimulatedBackend {
database, _ := ethdb.NewMemDatabase()
genesis := core.Genesis{Config: params.AllProtocolChanges, Alloc: alloc}
genesis.MustCommit(database)
- blockchain, _ := core.NewBlockChain(database, genesis.Config, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ blockchain, _ := core.NewBlockChain(database, genesis.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
backend := &SimulatedBackend{database: database, blockchain: blockchain, config: genesis.Config}
backend.rollback()
return backend
diff --git a/cmd/evm/disasm.go b/cmd/evm/disasm.go
index 01a8c60ba9..a78b2a8e18 100644
--- a/cmd/evm/disasm.go
+++ b/cmd/evm/disasm.go
@@ -20,10 +20,10 @@ import (
"errors"
"fmt"
"io/ioutil"
+ "strings"
"github.com/ethereum/go-ethereum/core/asm"
cli "gopkg.in/urfave/cli.v1"
- "strings"
)
var disasmCommand = cli.Command{
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index cc6d3ac6a1..e942d53c84 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -303,7 +303,15 @@ func startNode(ctx *cli.Context, stack *node.Node) {
if err := stack.Service(ðereum); err != nil {
utils.Fatalf("ethereum service not running: %v", err)
}
- if err := ethereum.StartMining(ctx.GlobalInt(utils.MinerThreadsFlag.Name)); err != nil {
+ if threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name); threads > 0 {
+ type threaded interface {
+ SetThreads(threads int)
+ }
+ if th, ok := ethereum.Engine().(threaded); ok {
+ th.SetThreads(threads)
+ }
+ }
+ if err := ethereum.StartMining(); err != nil {
utils.Fatalf("Failed to start mining: %v", err)
}
}
diff --git a/cmd/geth/misccmd.go b/cmd/geth/misccmd.go
index 073c36beb2..079f49310c 100644
--- a/cmd/geth/misccmd.go
+++ b/cmd/geth/misccmd.go
@@ -26,9 +26,9 @@ import (
"strings"
"github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"gopkg.in/urfave/cli.v1"
)
@@ -87,7 +87,7 @@ func makedag(ctx *cli.Context) error {
utils.Fatalf("Can't find dir")
}
fmt.Println("making DAG, this could take awhile...")
- pow.MakeDataset(blockNum, dir)
+ ethash.MakeDataset(blockNum, dir)
}
default:
wrongArgs()
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 478e08834b..acdf5d5dcb 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/vm"
@@ -49,7 +50,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/p2p/netutil"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rpc"
whisper "github.com/ethereum/go-ethereum/whisper/whisperv2"
"gopkg.in/urfave/cli.v1"
@@ -149,7 +149,7 @@ var (
}
TestNetFlag = cli.BoolFlag{
Name: "testnet",
- Usage: "Ropsten network: pre-configured test network",
+ Usage: "Ropsten network: pre-configured proof-of-work test network",
}
DevModeFlag = cli.BoolFlag{
Name: "dev",
@@ -921,16 +921,16 @@ func MakeChain(ctx *cli.Context, stack *node.Node) (chain *core.BlockChain, chai
var err error
chainDb = MakeChainDatabase(ctx, stack)
- seal := pow.PoW(pow.FakePow{})
+ engine := ethash.NewFaker()
if !ctx.GlobalBool(FakePoWFlag.Name) {
- seal = pow.NewFullEthash("", 1, 0, "", 1, 0)
+ engine = ethash.New("", 1, 0, "", 1, 0)
}
config, _, err := core.SetupGenesisBlock(chainDb, MakeGenesis(ctx))
if err != nil {
Fatalf("%v", err)
}
vmcfg := vm.Config{EnablePreimageRecording: ctx.GlobalBool(VMEnableDebugFlag.Name)}
- chain, err = core.NewBlockChain(chainDb, config, seal, new(event.TypeMux), vmcfg)
+ chain, err = core.NewBlockChain(chainDb, config, engine, new(event.TypeMux), vmcfg)
if err != nil {
Fatalf("Can't create BlockChain: %v", err)
}
diff --git a/consensus/consensus.go b/consensus/consensus.go
new file mode 100644
index 0000000000..e318e57c01
--- /dev/null
+++ b/consensus/consensus.go
@@ -0,0 +1,94 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package consensus implements different Ethereum consensus engines.
+package consensus
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+// ChainReader defines a small collection of methods needed to access the local
+// blockchain during header and/or uncle verification.
+type ChainReader interface {
+ // Config retrieves the blockchain's chain configuration.
+ Config() *params.ChainConfig
+
+ // CurrentHeader retrieves the current header from the local chain.
+ CurrentHeader() *types.Header
+
+ // GetHeader retrieves a block header from the database by hash and number.
+ GetHeader(hash common.Hash, number uint64) *types.Header
+
+ // GetHeaderByNumber retrieves a block header from the database by number.
+ GetHeaderByNumber(number uint64) *types.Header
+
+ // GetBlock retrieves a block from the database by hash and number.
+ GetBlock(hash common.Hash, number uint64) *types.Block
+}
+
+// Engine is an algorithm agnostic consensus engine.
+type Engine interface {
+ // VerifyHeader checks whether a header conforms to the consensus rules of a
+ // given engine. Verifying the seal may be done optionally here, or explicitly
+ // via the VerifySeal method.
+ VerifyHeader(chain ChainReader, header *types.Header, seal bool) error
+
+ // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
+ // concurrently. The method returns a quit channel to abort the operations and
+ // a results channel to retrieve the async verifications (the order is that of
+ // the input slice).
+ VerifyHeaders(chain ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
+
+ // VerifyUncles verifies that the given block's uncles conform to the consensus
+ // rules of a given engine.
+ VerifyUncles(chain ChainReader, block *types.Block) error
+
+ // VerifySeal checks whether the crypto seal on a header is valid according to
+ // the consensus rules of the given engine.
+ VerifySeal(chain ChainReader, header *types.Header) error
+
+ // Prepare initializes the consensus fields of a block header according to the
+ // rules of a particular engine. The changes are executed inline.
+ Prepare(chain ChainReader, header *types.Header) error
+
+ // Finalize runs any post-transaction state modifications (e.g. block rewards)
+ // and assembles the final block.
+ //
+ // Note, the block header and state database might be updated to reflect any
+ // consensus rules that happen at finalization (e.g. block rewards).
+ Finalize(chain ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
+ uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
+
+ // Seal generates a new block for the given input block with the local miner's
+ // seal place on top.
+ Seal(chain ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error)
+
+ // APIs returns the RPC APIs this consensus engine provides.
+ APIs(chain ChainReader) []rpc.API
+}
+
+// PoW is a consensus engine based on proof-of-work.
+type PoW interface {
+ Engine
+
+ // Hashrate returns the current mining hashrate of a PoW consensus engine.
+ Hashrate() float64
+}
diff --git a/pow/ethash_algo.go b/consensus/ethash/algorithm.go
similarity index 99%
rename from pow/ethash_algo.go
rename to consensus/ethash/algorithm.go
index 1e996785ff..7e8fbfc378 100644
--- a/pow/ethash_algo.go
+++ b/consensus/ethash/algorithm.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package pow
+package ethash
import (
"encoding/binary"
diff --git a/pow/ethash_algo_go1.7.go b/consensus/ethash/algorithm_go1.7.go
similarity index 99%
rename from pow/ethash_algo_go1.7.go
rename to consensus/ethash/algorithm_go1.7.go
index ce05b3bb08..c34d041c32 100644
--- a/pow/ethash_algo_go1.7.go
+++ b/consensus/ethash/algorithm_go1.7.go
@@ -16,7 +16,7 @@
// +build !go1.8
-package pow
+package ethash
// cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we
diff --git a/pow/ethash_algo_go1.8.go b/consensus/ethash/algorithm_go1.8.go
similarity index 99%
rename from pow/ethash_algo_go1.8.go
rename to consensus/ethash/algorithm_go1.8.go
index cac96cd5e9..62bf4dec1b 100644
--- a/pow/ethash_algo_go1.8.go
+++ b/consensus/ethash/algorithm_go1.8.go
@@ -16,7 +16,7 @@
// +build go1.8
-package pow
+package ethash
import "math/big"
diff --git a/pow/ethash_algo_go1.8_test.go b/consensus/ethash/algorithm_go1.8_test.go
similarity index 99%
rename from pow/ethash_algo_go1.8_test.go
rename to consensus/ethash/algorithm_go1.8_test.go
index 57e0b0b7a2..fdc3023189 100644
--- a/pow/ethash_algo_go1.8_test.go
+++ b/consensus/ethash/algorithm_go1.8_test.go
@@ -16,7 +16,7 @@
// +build go1.8
-package pow
+package ethash
import "testing"
diff --git a/pow/ethash_algo_test.go b/consensus/ethash/algorithm_test.go
similarity index 99%
rename from pow/ethash_algo_test.go
rename to consensus/ethash/algorithm_test.go
index 0605d70ad7..7e4307a74a 100644
--- a/pow/ethash_algo_test.go
+++ b/consensus/ethash/algorithm_test.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package pow
+package ethash
import (
"bytes"
@@ -704,8 +704,8 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
go func(idx int) {
defer pend.Done()
- ethash := NewFullEthash(cachedir, 0, 1, "", 0, 0)
- if err := ethash.Verify(block); err != nil {
+ ethash := New(cachedir, 0, 1, "", 0, 0)
+ if err := ethash.VerifySeal(nil, block.Header()); err != nil {
t.Errorf("proc %d: block verification failed: %v", idx, err)
}
}(i)
@@ -713,17 +713,6 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
pend.Wait()
}
-func TestTestMode(t *testing.T) {
- head := &types.Header{Difficulty: big.NewInt(100)}
- ethash := NewTestEthash()
- nonce, mix := ethash.Search(types.NewBlockWithHeader(head), nil)
- head.Nonce = types.EncodeNonce(nonce)
- copy(head.MixDigest[:], mix)
- if err := ethash.Verify(types.NewBlockWithHeader(head)); err != nil {
- t.Error("unexpected Verify error:", err)
- }
-}
-
// Benchmarks the cache generation performance.
func BenchmarkCacheGeneration(b *testing.B) {
for i := 0; i < b.N; i++ {
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
new file mode 100644
index 0000000000..603be3e531
--- /dev/null
+++ b/consensus/ethash/consensus.go
@@ -0,0 +1,496 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethash
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/big"
+ "runtime"
+ "sync/atomic"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+ set "gopkg.in/fatih/set.v0"
+)
+
+// Ethash proof-of-work protocol constants.
+var (
+ blockReward *big.Int = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
+ maxUncles = 2 // Maximum number of uncles allowed in a single block
+)
+
+var (
+ ErrInvalidChain = errors.New("invalid header chain")
+ ErrParentUnknown = errors.New("parent not known locally")
+ ErrFutureBlock = errors.New("block in the future")
+ ErrLargeBlockTimestamp = errors.New("timestamp too big")
+ ErrZeroBlockTime = errors.New("timestamp equals parent's")
+ ErrInvalidNumber = errors.New("invalid block number")
+ ErrTooManyUncles = errors.New("too many uncles")
+ ErrDuplicateUncle = errors.New("duplicate uncle")
+ ErrUncleIsAncestor = errors.New("uncle is ancestor")
+ ErrDanglingUncle = errors.New("uncle's parent is not ancestor")
+ ErrNonceOutOfRange = errors.New("nonce out of range")
+ ErrInvalidDifficulty = errors.New("non-positive difficulty")
+ ErrInvalidMixDigest = errors.New("invalid mix digest")
+ ErrInvalidPoW = errors.New("invalid proof-of-work")
+)
+
+// VerifyHeader checks whether a header conforms to the consensus rules of the
+// stock Ethereum ethash engine.
+func (ethash *Ethash) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
+ // If we're running a full engine faking, accept any input as valid
+ if ethash.fakeFull {
+ return nil
+ }
+ // Short circuit if the header is known, or it's parent not
+ number := header.Number.Uint64()
+ if chain.GetHeader(header.Hash(), number) != nil {
+ return nil
+ }
+ parent := chain.GetHeader(header.ParentHash, number-1)
+ if parent == nil {
+ return ErrParentUnknown
+ }
+ // Sanity checks passed, do a proper verification
+ return ethash.verifyHeader(chain, header, parent, false, seal)
+}
+
+// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
+// concurrently. The method returns a quit channel to abort the operations and
+// a results channel to retrieve the async verifications.
+func (ethash *Ethash) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
+ // If we're running a full engine faking, accept any input as valid
+ if ethash.fakeFull {
+ abort, results := make(chan struct{}), make(chan error, len(headers))
+ for i := 0; i < len(headers); i++ {
+ results <- nil
+ }
+ return abort, results
+ }
+ // Spawn as many workers as allowed threads
+ workers := runtime.GOMAXPROCS(0)
+ if len(headers) < workers {
+ workers = len(headers)
+ }
+ // Create a task channel and spawn the verifiers
+ type result struct {
+ index int
+ err error
+ }
+ inputs := make(chan int, workers)
+ outputs := make(chan result, len(headers))
+
+ var badblock uint64
+ for i := 0; i < workers; i++ {
+ go func() {
+ for index := range inputs {
+ // If we've found a bad block already before this, stop validating
+ if bad := atomic.LoadUint64(&badblock); bad != 0 && bad <= headers[index].Number.Uint64() {
+ outputs <- result{index: index, err: ErrInvalidChain}
+ continue
+ }
+ // We need to look up the first parent
+ var parent *types.Header
+ if index == 0 {
+ parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
+ } else if headers[index-1].Hash() == headers[index].ParentHash {
+ parent = headers[index-1]
+ }
+ // Ensure the validation is useful and execute it
+ var failure error
+ switch {
+ case chain.GetHeader(headers[index].Hash(), headers[index].Number.Uint64()-1) != nil:
+ outputs <- result{index: index, err: nil}
+ case parent == nil:
+ failure = ErrParentUnknown
+ outputs <- result{index: index, err: failure}
+ default:
+ failure = ethash.verifyHeader(chain, headers[index], parent, false, seals[index])
+ outputs <- result{index: index, err: failure}
+ }
+ // If a validation failure occurred, mark subsequent blocks invalid
+ if failure != nil {
+ number := headers[index].Number.Uint64()
+ if prev := atomic.LoadUint64(&badblock); prev == 0 || prev > number {
+ // This two step atomic op isn't thread-safe in that `badblock` might end
+ // up slightly higher than the block number of the first failure (if many
+ // workers try to write at the same time), but it's fine as we're mostly
+ // interested to avoid large useless work, we don't care about 1-2 extra
+ // runs. Doing "full thread safety" would involve mutexes, which would be
+ // a noticeable sync overhead on the fast spinning worker routines.
+ atomic.StoreUint64(&badblock, number)
+ }
+ }
+ }
+ }()
+ }
+ // Feed item indices to the workers until done, sorting and feeding the results to the caller
+ dones := make([]bool, len(headers))
+ errors := make([]error, len(headers))
+
+ abort := make(chan struct{})
+ returns := make(chan error, len(headers))
+
+ go func() {
+ defer close(inputs)
+
+ input, output := 0, 0
+ for i := 0; i < len(headers)*2; i++ {
+ var res result
+
+ // If there are tasks left, push to workers
+ if input < len(headers) {
+ select {
+ case inputs <- input:
+ input++
+ continue
+ case <-abort:
+ return
+ case res = <-outputs:
+ }
+ } else {
+ // Otherwise keep waiting for results
+ select {
+ case <-abort:
+ return
+ case res = <-outputs:
+ }
+ }
+ // A result arrived, save and propagate if next
+ dones[res.index], errors[res.index] = true, res.err
+ for output < len(headers) && dones[output] {
+ returns <- errors[output]
+ output++
+ }
+ }
+ }()
+ return abort, returns
+}
+
+// VerifyUncles verifies that the given block's uncles conform to the consensus
+// rules of the stock Ethereum ethash engine.
+func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
+ // If we're running a full engine faking, accept any input as valid
+ if ethash.fakeFull {
+ return nil
+ }
+ // Verify that there are at most 2 uncles included in this block
+ if len(block.Uncles()) > maxUncles {
+ return ErrTooManyUncles
+ }
+ // Gather the set of past uncles and ancestors
+ uncles, ancestors := set.New(), make(map[common.Hash]*types.Header)
+
+ number, parent := block.NumberU64()-1, block.ParentHash()
+ for i := 0; i < 7; i++ {
+ ancestor := chain.GetBlock(parent, number)
+ if ancestor == nil {
+ break
+ }
+ ancestors[ancestor.Hash()] = ancestor.Header()
+ for _, uncle := range ancestor.Uncles() {
+ uncles.Add(uncle.Hash())
+ }
+ parent, number = ancestor.ParentHash(), number-1
+ }
+ ancestors[block.Hash()] = block.Header()
+ uncles.Add(block.Hash())
+
+ // Verify each of the uncles that it's recent, but not an ancestor
+ for _, uncle := range block.Uncles() {
+ // Make sure every uncle is rewarded only once
+ hash := uncle.Hash()
+ if uncles.Has(hash) {
+ return ErrDuplicateUncle
+ }
+ uncles.Add(hash)
+
+ // Make sure the uncle has a valid ancestry
+ if ancestors[hash] != nil {
+ return ErrUncleIsAncestor
+ }
+ if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == block.ParentHash() {
+ return ErrDanglingUncle
+ }
+ if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, true); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// verifyHeader checks whether a header conforms to the consensus rules of the
+// stock Ethereum ethash engine.
+//
+// See YP section 4.3.4. "Block Header Validity"
+func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *types.Header, uncle bool, seal bool) error {
+ // Ensure that the header's extra-data section is of a reasonable size
+ if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
+ return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
+ }
+ // Verify the header's timestamp
+ if uncle {
+ if header.Time.Cmp(math.MaxBig256) > 0 {
+ return ErrLargeBlockTimestamp
+ }
+ } else {
+ if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
+ return ErrFutureBlock
+ }
+ }
+ if header.Time.Cmp(parent.Time) <= 0 {
+ return ErrZeroBlockTime
+ }
+ // Verify the block's difficulty based in it's timestamp and parent's difficulty
+ expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
+ if expected.Cmp(header.Difficulty) != 0 {
+ return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
+ }
+ // Verify that the gas limit remains within allowed bounds
+ diff := new(big.Int).Set(parent.GasLimit)
+ diff = diff.Sub(diff, header.GasLimit)
+ diff.Abs(diff)
+
+ limit := new(big.Int).Set(parent.GasLimit)
+ limit = limit.Div(limit, params.GasLimitBoundDivisor)
+
+ if diff.Cmp(limit) >= 0 || header.GasLimit.Cmp(params.MinGasLimit) < 0 {
+ return fmt.Errorf("invalid gas limit: have %v, want %v += %v", header.GasLimit, parent.GasLimit, limit)
+ }
+ // Verify that the block number is parent's +1
+ if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
+ return ErrInvalidNumber
+ }
+ // Verify the engine specific seal securing the block
+ if seal {
+ if err := ethash.VerifySeal(chain, header); err != nil {
+ return err
+ }
+ }
+ // If all checks passed, validate any special fields for hard forks
+ if err := misc.VerifyDAOHeaderExtraData(chain.Config(), header); err != nil {
+ return err
+ }
+ if err := misc.VerifyForkHashes(chain.Config(), header, uncle); err != nil {
+ return err
+ }
+ return nil
+}
+
+// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
+// that a new block should have when created at time given the parent block's time
+// and difficulty.
+//
+// TODO (karalabe): Move the chain maker into this package and make this private!
+func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
+ if config.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
+ return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
+ }
+ return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
+}
+
+// Some weird constants to avoid constant memory allocs for them.
+var (
+ expDiffPeriod = big.NewInt(100000)
+ big10 = big.NewInt(10)
+ bigMinus99 = big.NewInt(-99)
+)
+
+// calcDifficultyHomestead is the difficulty adjustment algorithm. It returns
+// the difficulty that a new block should have when created at time given the
+// parent block's time and difficulty. The calculation uses the Homestead rules.
+func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
+ // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
+ // algorithm:
+ // diff = (parent_diff +
+ // (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
+ // ) + 2^(periodCount - 2)
+
+ bigTime := new(big.Int).SetUint64(time)
+ bigParentTime := new(big.Int).SetUint64(parentTime)
+
+ // holds intermediate values to make the algo easier to read & audit
+ x := new(big.Int)
+ y := new(big.Int)
+
+ // 1 - (block_timestamp -parent_timestamp) // 10
+ x.Sub(bigTime, bigParentTime)
+ x.Div(x, big10)
+ x.Sub(common.Big1, x)
+
+ // max(1 - (block_timestamp - parent_timestamp) // 10, -99)))
+ if x.Cmp(bigMinus99) < 0 {
+ x.Set(bigMinus99)
+ }
+ // (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
+ y.Div(parentDiff, params.DifficultyBoundDivisor)
+ x.Mul(y, x)
+ x.Add(parentDiff, x)
+
+ // minimum difficulty can ever be (before exponential factor)
+ if x.Cmp(params.MinimumDifficulty) < 0 {
+ x.Set(params.MinimumDifficulty)
+ }
+ // for the exponential factor
+ periodCount := new(big.Int).Add(parentNumber, common.Big1)
+ periodCount.Div(periodCount, expDiffPeriod)
+
+ // the exponential factor, commonly referred to as "the bomb"
+ // diff = diff + 2^(periodCount - 2)
+ if periodCount.Cmp(common.Big1) > 0 {
+ y.Sub(periodCount, common.Big2)
+ y.Exp(common.Big2, y, nil)
+ x.Add(x, y)
+ }
+ return x
+}
+
+// calcDifficultyFrontier is the difficulty adjustment algorithm. It returns the
+// difficulty that a new block should have when created at time given the parent
+// block's time and difficulty. The calculation uses the Frontier rules.
+func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
+ diff := new(big.Int)
+ adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
+ bigTime := new(big.Int)
+ bigParentTime := new(big.Int)
+
+ bigTime.SetUint64(time)
+ bigParentTime.SetUint64(parentTime)
+
+ if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
+ diff.Add(parentDiff, adjust)
+ } else {
+ diff.Sub(parentDiff, adjust)
+ }
+ if diff.Cmp(params.MinimumDifficulty) < 0 {
+ diff.Set(params.MinimumDifficulty)
+ }
+
+ periodCount := new(big.Int).Add(parentNumber, common.Big1)
+ periodCount.Div(periodCount, expDiffPeriod)
+ if periodCount.Cmp(common.Big1) > 0 {
+ // diff = diff + 2^(periodCount - 2)
+ expDiff := periodCount.Sub(periodCount, common.Big2)
+ expDiff.Exp(common.Big2, expDiff, nil)
+ diff.Add(diff, expDiff)
+ diff = math.BigMax(diff, params.MinimumDifficulty)
+ }
+ return diff
+}
+
+// VerifySeal implements consensus.Engine, checking whether the given block satisfies
+// the PoW difficulty requirements.
+func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
+ // If we're running a fake PoW, accept any seal as valid
+ if ethash.fakeMode {
+ time.Sleep(ethash.fakeDelay)
+ if ethash.fakeFail == header.Number.Uint64() {
+ return ErrInvalidPoW
+ }
+ return nil
+ }
+ // If we're running a shared PoW, delegate verification to it
+ if ethash.shared != nil {
+ return ethash.shared.VerifySeal(chain, header)
+ }
+ // Sanity check that the block number is below the lookup table size (60M blocks)
+ number := header.Number.Uint64()
+ if number/epochLength >= uint64(len(cacheSizes)) {
+ // Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
+ return ErrNonceOutOfRange
+ }
+ // Ensure that we have a valid difficulty for the block
+ if header.Difficulty.Sign() <= 0 {
+ return ErrInvalidDifficulty
+ }
+ // Recompute the digest and PoW value and verify against the header
+ cache := ethash.cache(number)
+
+ size := datasetSize(number)
+ if ethash.tester {
+ size = 32 * 1024
+ }
+ digest, result := hashimotoLight(size, cache, header.HashNoNonce().Bytes(), header.Nonce.Uint64())
+ if !bytes.Equal(header.MixDigest[:], digest) {
+ return ErrInvalidMixDigest
+ }
+ target := new(big.Int).Div(maxUint256, header.Difficulty)
+ if new(big.Int).SetBytes(result).Cmp(target) > 0 {
+ return ErrInvalidPoW
+ }
+ return nil
+}
+
+// Prepare implements consensus.Engine, initializing the difficulty field of a
+// header to conform to the ethash protocol. The changes are done inline.
+func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header) error {
+ parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
+ if parent == nil {
+ return ErrParentUnknown
+ }
+ header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(),
+ parent.Time.Uint64(), parent.Number, parent.Difficulty)
+
+ return nil
+}
+
+// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
+// setting the final state and assembling the block.
+func (ethash *Ethash) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
+ // Accumulate any block and uncle rewards and commit the final state root
+ AccumulateRewards(state, header, uncles)
+ header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
+
+ // Header seems complete, assemble into a block and return
+ return types.NewBlock(header, txs, uncles, receipts), nil
+}
+
+// Some weird constants to avoid constant memory allocs for them.
+var (
+ big8 = big.NewInt(8)
+ big32 = big.NewInt(32)
+)
+
+// AccumulateRewards credits the coinbase of the given block with the mining
+// reward. The total reward consists of the static block reward and rewards for
+// included uncles. The coinbase of each uncle block is also rewarded.
+//
+// TODO (karalabe): Move the chain maker into this package and make this private!
+func AccumulateRewards(state *state.StateDB, header *types.Header, uncles []*types.Header) {
+ reward := new(big.Int).Set(blockReward)
+ r := new(big.Int)
+ for _, uncle := range uncles {
+ r.Add(uncle.Number, big8)
+ r.Sub(r, header.Number)
+ r.Mul(r, blockReward)
+ r.Div(r, big8)
+ state.AddBalance(uncle.Coinbase, r)
+
+ r.Div(blockReward, big32)
+ reward.Add(reward, r)
+ }
+ state.AddBalance(header.Coinbase, reward)
+}
diff --git a/consensus/ethash/consensus_test.go b/consensus/ethash/consensus_test.go
new file mode 100644
index 0000000000..683c10be49
--- /dev/null
+++ b/consensus/ethash/consensus_test.go
@@ -0,0 +1,79 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethash
+
+import (
+ "encoding/json"
+ "math/big"
+ "os"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+type diffTest struct {
+ ParentTimestamp uint64
+ ParentDifficulty *big.Int
+ CurrentTimestamp uint64
+ CurrentBlocknumber *big.Int
+ CurrentDifficulty *big.Int
+}
+
+func (d *diffTest) UnmarshalJSON(b []byte) (err error) {
+ var ext struct {
+ ParentTimestamp string
+ ParentDifficulty string
+ CurrentTimestamp string
+ CurrentBlocknumber string
+ CurrentDifficulty string
+ }
+ if err := json.Unmarshal(b, &ext); err != nil {
+ return err
+ }
+
+ d.ParentTimestamp = math.MustParseUint64(ext.ParentTimestamp)
+ d.ParentDifficulty = math.MustParseBig256(ext.ParentDifficulty)
+ d.CurrentTimestamp = math.MustParseUint64(ext.CurrentTimestamp)
+ d.CurrentBlocknumber = math.MustParseBig256(ext.CurrentBlocknumber)
+ d.CurrentDifficulty = math.MustParseBig256(ext.CurrentDifficulty)
+
+ return nil
+}
+
+func TestCalcDifficulty(t *testing.T) {
+ file, err := os.Open("../../tests/files/BasicTests/difficulty.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer file.Close()
+
+ tests := make(map[string]diffTest)
+ err = json.NewDecoder(file).Decode(&tests)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
+ for name, test := range tests {
+ number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
+ diff := CalcDifficulty(config, test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
+ if diff.Cmp(test.CurrentDifficulty) != 0 {
+ t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
+ }
+ }
+}
diff --git a/pow/ethash.go b/consensus/ethash/ethash.go
similarity index 80%
rename from pow/ethash.go
rename to consensus/ethash/ethash.go
index 9adc38540d..aa5b2d8a02 100644
--- a/pow/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -14,10 +14,10 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package pow
+// Package ethash implements the ethash proof-of-work consensus engine.
+package ethash
import (
- "bytes"
"errors"
"fmt"
"math"
@@ -32,24 +32,20 @@ import (
"unsafe"
mmap "github.com/edsrzf/mmap-go"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rpc"
metrics "github.com/rcrowley/go-metrics"
)
-var (
- ErrInvalidDumpMagic = errors.New("invalid dump magic")
- ErrNonceOutOfRange = errors.New("nonce out of range")
- ErrInvalidDifficulty = errors.New("non-positive difficulty")
- ErrInvalidMixDigest = errors.New("invalid mix digest")
- ErrInvalidPoW = errors.New("pow difficulty invalid")
-)
+var ErrInvalidDumpMagic = errors.New("invalid dump magic")
var (
// maxUint256 is a big integer representing 2^256-1
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// sharedEthash is a full instance that can be shared between multiple users.
- sharedEthash = NewFullEthash("", 3, 0, "", 1, 0)
+ sharedEthash = New("", 3, 0, "", 1, 0)
// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
@@ -321,7 +317,8 @@ func MakeDataset(block uint64, dir string) {
d.release()
}
-// Ethash is a PoW data struture implementing the ethash algorithm.
+// Ethash is a consensus engine based on proot-of-work implementing the ethash
+// algorithm.
type Ethash struct {
cachedir string // Data directory to store the verification caches
cachesinmem int // Number of caches to keep in memory
@@ -334,15 +331,26 @@ type Ethash struct {
fcache *cache // Pre-generated cache for the estimated future epoch
datasets map[uint64]*dataset // In memory datasets to avoid regenerating too often
fdataset *dataset // Pre-generated dataset for the estimated future epoch
- lock sync.Mutex // Ensures thread safety for the in-memory caches
+ // Mining related fields
+ rand *rand.Rand // Properly seeded random source for nonces
+ threads int // Number of threads to mine on if mining
+ update chan struct{} // Notification channel to update mining parameters
hashrate metrics.Meter // Meter tracking the average hashrate
- tester bool // Flag whether to use a smaller test dataset
+ // The fields below are hooks for testing
+ tester bool // Flag whether to use a smaller test dataset
+ shared *Ethash // Shared PoW verifier to avoid cache regeneration
+ fakeMode bool // Flag whether to disable PoW checking
+ fakeFull bool // Flag whether to disable all consensus rules
+ fakeFail uint64 // Block number which fails PoW check even in fake mode
+ fakeDelay time.Duration // Time delay to sleep for before returning from verify
+
+ lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
}
-// NewFullEthash creates a full sized ethash PoW scheme.
-func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) PoW {
+// New creates a full sized ethash PoW scheme.
+func New(cachedir string, cachesinmem, cachesondisk int, dagdir string, dagsinmem, dagsondisk int) *Ethash {
if cachesinmem <= 0 {
log.Warn("One ethash cache must alwast be in memory", "requested", cachesinmem)
cachesinmem = 1
@@ -362,58 +370,55 @@ func NewFullEthash(cachedir string, cachesinmem, cachesondisk int, dagdir string
dagsondisk: dagsondisk,
caches: make(map[uint64]*cache),
datasets: make(map[uint64]*dataset),
+ update: make(chan struct{}),
hashrate: metrics.NewMeter(),
}
}
-// NewTestEthash creates a small sized ethash PoW scheme useful only for testing
+// NewTester creates a small sized ethash PoW scheme useful only for testing
// purposes.
-func NewTestEthash() PoW {
+func NewTester() *Ethash {
return &Ethash{
cachesinmem: 1,
caches: make(map[uint64]*cache),
datasets: make(map[uint64]*dataset),
tester: true,
+ update: make(chan struct{}),
hashrate: metrics.NewMeter(),
}
}
-// NewSharedEthash creates a full sized ethash PoW shared between all requesters
-// running in the same process.
-func NewSharedEthash() PoW {
- return sharedEthash
+// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
+// all blocks' seal as valid, though they still have to conform to the Ethereum
+// consensus rules.
+func NewFaker() *Ethash {
+ return &Ethash{fakeMode: true}
}
-// Verify implements PoW, checking whether the given block satisfies the PoW
-// difficulty requirements.
-func (ethash *Ethash) Verify(block Block) error {
- // Sanity check that the block number is below the lookup table size (60M blocks)
- number := block.NumberU64()
- if number/epochLength >= uint64(len(cacheSizes)) {
- // Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
- return ErrNonceOutOfRange
- }
- // Ensure that we have a valid difficulty for the block
- difficulty := block.Difficulty()
- if difficulty.Sign() <= 0 {
- return ErrInvalidDifficulty
- }
- // Recompute the digest and PoW value and verify against the block
- cache := ethash.cache(number)
+// NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
+// accepts all blocks as valid apart from the single one specified, though they
+// still have to conform to the Ethereum consensus rules.
+func NewFakeFailer(fail uint64) *Ethash {
+ return &Ethash{fakeMode: true, fakeFail: fail}
+}
- size := datasetSize(number)
- if ethash.tester {
- size = 32 * 1024
- }
- digest, result := hashimotoLight(size, cache, block.HashNoNonce().Bytes(), block.Nonce())
- if !bytes.Equal(block.MixDigest().Bytes(), digest) {
- return ErrInvalidMixDigest
- }
- target := new(big.Int).Div(maxUint256, difficulty)
- if new(big.Int).SetBytes(result).Cmp(target) > 0 {
- return ErrInvalidPoW
- }
- return nil
+// NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
+// accepts all blocks as valid, but delays verifications by some time, though
+// they still have to conform to the Ethereum consensus rules.
+func NewFakeDelayer(delay time.Duration) *Ethash {
+ return &Ethash{fakeMode: true, fakeDelay: delay}
+}
+
+// NewFullFaker creates a ethash consensus engine with a full fake scheme that
+// accepts all blocks as valid, without checking any consensus rules whatsoever.
+func NewFullFaker() *Ethash {
+ return &Ethash{fakeMode: true, fakeFull: true}
+}
+
+// NewShared creates a full sized ethash PoW shared between all requesters running
+// in the same process.
+func NewShared() *Ethash {
+ return &Ethash{shared: sharedEthash}
}
// cache tries to retrieve a verification cache for the specified block number
@@ -477,43 +482,6 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
return current.cache
}
-// Search implements PoW, attempting to find a nonce that satisfies the block's
-// difficulty requirements.
-func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte) {
- var (
- hash = block.HashNoNonce().Bytes()
- diff = block.Difficulty()
- target = new(big.Int).Div(maxUint256, diff)
- dataset = ethash.dataset(block.NumberU64())
- rand = rand.New(rand.NewSource(time.Now().UnixNano()))
- nonce = uint64(rand.Int63())
- attempts int64
- )
- // Start generating random nonces until we abort or find a good one
- for {
- select {
- case <-stop:
- // Mining terminated, update stats and abort
- ethash.hashrate.Mark(attempts)
- return 0, nil
-
- default:
- // We don't have to update hash rate on every nonce, so update after after 2^X nonces
- attempts++
- if (attempts % (1 << 15)) == 0 {
- ethash.hashrate.Mark(attempts)
- attempts = 0
- }
- // Compute the PoW value of this nonce
- digest, result := hashimotoFull(dataset, hash, nonce)
- if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
- return nonce, digest
- }
- nonce++
- }
- }
-}
-
// dataset tries to retrieve a mining dataset for the specified block number
// by first checking against a list of in-memory datasets, then against DAGs
// stored on disk, and finally generating one if none can be found.
@@ -576,14 +544,44 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
return current.dataset
}
+// Threads returns the number of mining threads currently enabled. This doesn't
+// necessarily mean that mining is running!
+func (ethash *Ethash) Threads() int {
+ ethash.lock.Lock()
+ defer ethash.lock.Unlock()
+
+ return ethash.threads
+}
+
+// SetThreads updates the number of mining threads currently enabled. Calling
+// this method does not start mining, only sets the thread count. If zero is
+// specified, the miner will use all cores of the machine.
+func (ethash *Ethash) SetThreads(threads int) {
+ ethash.lock.Lock()
+ defer ethash.lock.Unlock()
+
+ // Update the threads and ping any running seal to pull in any changes
+ ethash.threads = threads
+ select {
+ case ethash.update <- struct{}{}:
+ default:
+ }
+}
+
// Hashrate implements PoW, returning the measured rate of the search invocations
// per second over the last minute.
func (ethash *Ethash) Hashrate() float64 {
return ethash.hashrate.Rate1()
}
-// EthashSeedHash is the seed to use for generating a vrification cache and the
-// mining dataset.
-func EthashSeedHash(block uint64) []byte {
+// APIs implements consensus.Engine, returning the user facing RPC APIs. Currently
+// that is empty.
+func (ethash *Ethash) APIs(chain consensus.ChainReader) []rpc.API {
+ return nil
+}
+
+// SeedHash is the seed to use for generating a verification cache and the mining
+// dataset.
+func SeedHash(block uint64) []byte {
return seedHash(block)
}
diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go
new file mode 100644
index 0000000000..b3a2f32f70
--- /dev/null
+++ b/consensus/ethash/ethash_test.go
@@ -0,0 +1,40 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethash
+
+import (
+ "math/big"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+// Tests that ethash works correctly in test mode.
+func TestTestMode(t *testing.T) {
+ head := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
+
+ ethash := NewTester()
+ block, err := ethash.Seal(nil, types.NewBlockWithHeader(head), nil)
+ if err != nil {
+ t.Fatalf("failed to seal block: %v", err)
+ }
+ head.Nonce = types.EncodeNonce(block.Nonce())
+ head.MixDigest = block.MixDigest()
+ if err := ethash.VerifySeal(nil, head); err != nil {
+ t.Fatalf("unexpected verification error: %v", err)
+ }
+}
diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go
new file mode 100644
index 0000000000..9a000ed316
--- /dev/null
+++ b/consensus/ethash/sealer.go
@@ -0,0 +1,146 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package ethash
+
+import (
+ crand "crypto/rand"
+ "math"
+ "math/big"
+ "math/rand"
+ "runtime"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// Seal implements consensus.Engine, attempting to find a nonce that satisfies
+// the block's difficulty requirements.
+func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, stop <-chan struct{}) (*types.Block, error) {
+ // If we're running a fake PoW, simply return a 0 nonce immediately
+ if ethash.fakeMode {
+ header := block.Header()
+ header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
+ return block.WithSeal(header), nil
+ }
+ // If we're running a shared PoW, delegate sealing to it
+ if ethash.shared != nil {
+ return ethash.shared.Seal(chain, block, stop)
+ }
+ // Create a runner and the multiple search threads it directs
+ abort := make(chan struct{})
+ found := make(chan *types.Block)
+
+ ethash.lock.Lock()
+ threads := ethash.threads
+ if ethash.rand == nil {
+ seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
+ if err != nil {
+ ethash.lock.Unlock()
+ return nil, err
+ }
+ ethash.rand = rand.New(rand.NewSource(seed.Int64()))
+ }
+ ethash.lock.Unlock()
+ if threads == 0 {
+ threads = runtime.NumCPU()
+ }
+ var pend sync.WaitGroup
+ for i := 0; i < threads; i++ {
+ pend.Add(1)
+ go func(id int, nonce uint64) {
+ defer pend.Done()
+ ethash.mine(block, id, nonce, abort, found)
+ }(i, uint64(ethash.rand.Int63()))
+ }
+ // Wait until sealing is terminated or a nonce is found
+ var result *types.Block
+ select {
+ case <-stop:
+ // Outside abort, stop all miner threads
+ close(abort)
+ case result = <-found:
+ // One of the threads found a block, abort all others
+ close(abort)
+ case <-ethash.update:
+ // Thread count was changed on user request, restart
+ close(abort)
+ pend.Wait()
+ return ethash.Seal(chain, block, stop)
+ }
+ // Wait for all miners to terminate and return the block
+ pend.Wait()
+ return result, nil
+}
+
+// mine is the actual proof-of-work miner that searches for a nonce starting from
+// seed that results in correct final block difficulty.
+func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
+ // Extract some data from the header
+ var (
+ header = block.Header()
+ hash = header.HashNoNonce().Bytes()
+ target = new(big.Int).Div(maxUint256, header.Difficulty)
+
+ number = header.Number.Uint64()
+ dataset = ethash.dataset(number)
+ )
+ // Start generating random nonces until we abort or find a good one
+ var (
+ attempts = int64(0)
+ nonce = seed
+ )
+ logger := log.New("miner", id)
+ logger.Trace("Started ethash search for new nonces", "seed", seed)
+ for {
+ select {
+ case <-abort:
+ // Mining terminated, update stats and abort
+ logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
+ ethash.hashrate.Mark(attempts)
+ return
+
+ default:
+ // We don't have to update hash rate on every nonce, so update after after 2^X nonces
+ attempts++
+ if (attempts % (1 << 15)) == 0 {
+ ethash.hashrate.Mark(attempts)
+ attempts = 0
+ }
+ // Compute the PoW value of this nonce
+ digest, result := hashimotoFull(dataset, hash, nonce)
+ if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
+ // Correct nonce found, create a new header with it
+ header = types.CopyHeader(header)
+ header.Nonce = types.EncodeNonce(nonce)
+ header.MixDigest = common.BytesToHash(digest)
+
+ // Seal and return a block (if still needed)
+ select {
+ case found <- block.WithSeal(header):
+ logger.Trace("Ethash nonce found and reported", "attempts", nonce-seed, "nonce", nonce)
+ case <-abort:
+ logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
+ }
+ return
+ }
+ nonce++
+ }
+ }
+}
diff --git a/pow/xor.go b/consensus/ethash/xor.go
similarity index 99%
rename from pow/xor.go
rename to consensus/ethash/xor.go
index 558aeb4690..90e2327466 100644
--- a/pow/xor.go
+++ b/consensus/ethash/xor.go
@@ -4,7 +4,7 @@
// Source: https://golang.org/src/crypto/cipher/xor.go
-package pow
+package ethash
import (
"runtime"
diff --git a/consensus/misc/dao.go b/consensus/misc/dao.go
new file mode 100644
index 0000000000..67d4adb445
--- /dev/null
+++ b/consensus/misc/dao.go
@@ -0,0 +1,85 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package misc
+
+import (
+ "bytes"
+ "errors"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+var (
+ // ErrBadProDAOExtra is returned if a header doens't support the DAO fork on a
+ // pro-fork client.
+ ErrBadProDAOExtra = errors.New("bad DAO pro-fork extra-data")
+
+ // ErrBadNoDAOExtra is returned if a header does support the DAO fork on a no-
+ // fork client.
+ ErrBadNoDAOExtra = errors.New("bad DAO no-fork extra-data")
+)
+
+// VerifyDAOHeaderExtraData validates the extra-data field of a block header to
+// ensure it conforms to DAO hard-fork rules.
+//
+// DAO hard-fork extension to the header validity:
+// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range
+// with the fork specific extra-data set
+// b) if the node is pro-fork, require blocks in the specific range to have the
+// unique extra-data set.
+func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error {
+ // Short circuit validation if the node doesn't care about the DAO fork
+ if config.DAOForkBlock == nil {
+ return nil
+ }
+ // Make sure the block is within the fork's modified extra-data range
+ limit := new(big.Int).Add(config.DAOForkBlock, params.DAOForkExtraRange)
+ if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
+ return nil
+ }
+ // Depending whether we support or oppose the fork, validate the extra-data contents
+ if config.DAOForkSupport {
+ if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
+ return ErrBadProDAOExtra
+ }
+ } else {
+ if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
+ return ErrBadNoDAOExtra
+ }
+ }
+ // All ok, header has the same extra-data we expect
+ return nil
+}
+
+// ApplyDAOHardFork modifies the state database according to the DAO hard-fork
+// rules, transferring all balances of a set of DAO accounts to a single refund
+// contract.
+func ApplyDAOHardFork(statedb *state.StateDB) {
+ // Retrieve the contract to refund balances into
+ if !statedb.Exist(params.DAORefundContract) {
+ statedb.CreateAccount(params.DAORefundContract)
+ }
+
+ // Move every DAO account and extra-balance account funds into the refund contract
+ for _, addr := range params.DAODrainList() {
+ statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr))
+ statedb.SetBalance(addr, new(big.Int))
+ }
+}
diff --git a/consensus/misc/forks.go b/consensus/misc/forks.go
new file mode 100644
index 0000000000..4a5e7c37e0
--- /dev/null
+++ b/consensus/misc/forks.go
@@ -0,0 +1,43 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package misc
+
+import (
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/params"
+)
+
+// VerifyForkHashes verifies that blocks conforming to network hard-forks do have
+// the correct hashes, to avoid clients going off on different chains. This is an
+// optional feature.
+func VerifyForkHashes(config *params.ChainConfig, header *types.Header, uncle bool) error {
+ // We don't care about uncles
+ if uncle {
+ return nil
+ }
+ // If the homestead reprice hash is set, validate it
+ if config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
+ if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
+ return fmt.Errorf("homestead gas reprice fork: have 0x%x, want 0x%x", header.Hash(), config.EIP150Hash)
+ }
+ }
+ // All ok, return
+ return nil
+}
diff --git a/core/bench_test.go b/core/bench_test.go
index a154ccbb12..20676fc976 100644
--- a/core/bench_test.go
+++ b/core/bench_test.go
@@ -25,13 +25,13 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
func BenchmarkInsertChain_empty_memdb(b *testing.B) {
@@ -176,7 +176,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
// Time the insertion of the new chain.
// State and blocks are stored in the same DB.
evmux := new(event.TypeMux)
- chainman, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
+ chainman, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
defer chainman.Stop()
b.ReportAllocs()
b.ResetTimer()
@@ -286,7 +286,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) {
if err != nil {
b.Fatalf("error opening database at %v: %v", dir, err)
}
- chain, err := NewBlockChain(db, params.TestChainConfig, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ chain, err := NewBlockChain(db, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if err != nil {
b.Fatalf("error creating chain: %v", err)
}
diff --git a/core/block_validator.go b/core/block_validator.go
index f93a9f40ba..00457dd7ab 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -19,22 +19,12 @@ package core
import (
"fmt"
"math/big"
- "time"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
- "gopkg.in/fatih/set.v0"
-)
-
-var (
- ExpDiffPeriod = big.NewInt(100000)
- big10 = big.NewInt(10)
- bigMinus99 = big.NewInt(-99)
)
// BlockValidator is responsible for validating block headers, uncles and
@@ -44,30 +34,24 @@ var (
type BlockValidator struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
- Pow pow.PoW // Proof of work used for validating
+ engine consensus.Engine // Consensus engine used for validating
}
// NewBlockValidator returns a new block validator which is safe for re-use
-func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, pow pow.PoW) *BlockValidator {
+func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator {
validator := &BlockValidator{
config: config,
- Pow: pow,
+ engine: engine,
bc: blockchain,
}
return validator
}
-// ValidateBlock validates the given block's header and uncles and verifies the
-// the block header's transaction and uncle roots.
-//
-// ValidateBlock does not validate the header's pow. The pow work validated
-// separately so we can process them in parallel.
-//
-// ValidateBlock also validates and makes sure that any previous state (or present)
-// state that might or might not be present is checked to make sure that fast
-// sync has done it's job proper. This prevents the block validator from accepting
-// false positives where a header is present but the state is not.
-func (v *BlockValidator) ValidateBlock(block *types.Block) error {
+// ValidateBody validates the given block's uncles and verifies the the block
+// header's transaction and uncle roots. The headers are assumed to be already
+// validated at this point.
+func (v *BlockValidator) ValidateBody(block *types.Block) error {
+ // Check whether the block's known, and if not, that it's linkable
if v.bc.HasBlock(block.Hash()) {
if _, err := state.New(block.Root(), v.bc.chainDb); err == nil {
return &KnownBlockError{block.Number(), block.Hash()}
@@ -80,30 +64,17 @@ func (v *BlockValidator) ValidateBlock(block *types.Block) error {
if _, err := state.New(parent.Root(), v.bc.chainDb); err != nil {
return ParentError(block.ParentHash())
}
-
+ // Header validity is known at this point, check the uncles and transactions
header := block.Header()
- // validate the block header
- if err := ValidateHeader(v.config, v.Pow, header, parent.Header(), false, false); err != nil {
+ if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err
}
- // verify the uncles are correctly rewarded
- if err := v.VerifyUncles(block, parent); err != nil {
- return err
+ if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
+ return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
}
-
- // Verify UncleHash before running other uncle validations
- unclesSha := types.CalcUncleHash(block.Uncles())
- if unclesSha != header.UncleHash {
- return fmt.Errorf("invalid uncles root hash (remote: %x local: %x)", header.UncleHash, unclesSha)
- }
-
- // The transactions Trie's root (R = (Tr [[i, RLP(T1)], [i, RLP(T2)], ... [n, RLP(Tn)]]))
- // can be used by light clients to make sure they've received the correct Txs
- txSha := types.DeriveSha(block.Transactions())
- if txSha != header.TxHash {
- return fmt.Errorf("invalid transaction root hash (remote: %x local: %x)", header.TxHash, txSha)
+ if hash := types.DeriveSha(block.Transactions()); hash != header.TxHash {
+ return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
-
return nil
}
@@ -135,222 +106,6 @@ func (v *BlockValidator) ValidateState(block, parent *types.Block, statedb *stat
return nil
}
-// VerifyUncles verifies the given block's uncles and applies the Ethereum
-// consensus rules to the various block headers included; it will return an
-// error if any of the included uncle headers were invalid. It returns an error
-// if the validation failed.
-func (v *BlockValidator) VerifyUncles(block, parent *types.Block) error {
- // validate that there are at most 2 uncles included in this block
- if len(block.Uncles()) > 2 {
- return ValidationError("Block can only contain maximum 2 uncles (contained %v)", len(block.Uncles()))
- }
-
- uncles := set.New()
- ancestors := make(map[common.Hash]*types.Block)
- for _, ancestor := range v.bc.GetBlocksFromHash(block.ParentHash(), 7) {
- ancestors[ancestor.Hash()] = ancestor
- // Include ancestors uncles in the uncle set. Uncles must be unique.
- for _, uncle := range ancestor.Uncles() {
- uncles.Add(uncle.Hash())
- }
- }
- ancestors[block.Hash()] = block
- uncles.Add(block.Hash())
-
- for i, uncle := range block.Uncles() {
- hash := uncle.Hash()
- if uncles.Has(hash) {
- // Error not unique
- return UncleError("uncle[%d](%x) not unique", i, hash[:4])
- }
- uncles.Add(hash)
-
- if ancestors[hash] != nil {
- branch := fmt.Sprintf(" O - %x\n |\n", block.Hash())
- for h := range ancestors {
- branch += fmt.Sprintf(" O - %x\n |\n", h)
- }
- log.Warn(branch)
- return UncleError("uncle[%d](%x) is ancestor", i, hash[:4])
- }
-
- if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == parent.Hash() {
- return UncleError("uncle[%d](%x)'s parent is not ancestor (%x)", i, hash[:4], uncle.ParentHash[0:4])
- }
-
- if err := ValidateHeader(v.config, v.Pow, uncle, ancestors[uncle.ParentHash].Header(), true, true); err != nil {
- return ValidationError(fmt.Sprintf("uncle[%d](%x) header invalid: %v", i, hash[:4], err))
- }
- }
-
- return nil
-}
-
-// ValidateHeader validates the given header and, depending on the pow arg,
-// checks the proof of work of the given header. Returns an error if the
-// validation failed.
-func (v *BlockValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
- // Short circuit if the parent is missing.
- if parent == nil {
- return ParentError(header.ParentHash)
- }
- // Short circuit if the header's already known or its parent is missing
- if v.bc.HasHeader(header.Hash()) {
- return nil
- }
- return ValidateHeader(v.config, v.Pow, header, parent, checkPow, false)
-}
-
-// Validates a header. Returns an error if the header is invalid.
-//
-// See YP section 4.3.4. "Block Header Validity"
-func ValidateHeader(config *params.ChainConfig, pow pow.PoW, header *types.Header, parent *types.Header, checkPow, uncle bool) error {
- if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
- return fmt.Errorf("Header extra data too long (%d)", len(header.Extra))
- }
-
- if uncle {
- if header.Time.Cmp(math.MaxBig256) == 1 {
- return BlockTSTooBigErr
- }
- } else {
- if header.Time.Cmp(big.NewInt(time.Now().Unix())) == 1 {
- return BlockFutureErr
- }
- }
- if header.Time.Cmp(parent.Time) != 1 {
- return BlockEqualTSErr
- }
-
- expd := CalcDifficulty(config, header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
- if expd.Cmp(header.Difficulty) != 0 {
- return fmt.Errorf("Difficulty check failed for header (remote: %v local: %v)", header.Difficulty, expd)
- }
-
- a := new(big.Int).Set(parent.GasLimit)
- a = a.Sub(a, header.GasLimit)
- a.Abs(a)
- b := new(big.Int).Set(parent.GasLimit)
- b = b.Div(b, params.GasLimitBoundDivisor)
- if !(a.Cmp(b) < 0) || (header.GasLimit.Cmp(params.MinGasLimit) == -1) {
- return fmt.Errorf("GasLimit check failed for header (remote: %v local_max: %v)", header.GasLimit, b)
- }
-
- num := new(big.Int).Set(parent.Number)
- num.Sub(header.Number, num)
- if num.Cmp(big.NewInt(1)) != 0 {
- return BlockNumberErr
- }
-
- if checkPow {
- // Verify the nonce of the header. Return an error if it's not valid
- if err := pow.Verify(types.NewBlockWithHeader(header)); err != nil {
- return &BlockNonceErr{header.Number, header.Hash(), header.Nonce.Uint64()}
- }
- }
- // If all checks passed, validate the extra-data field for hard forks
- if err := ValidateDAOHeaderExtraData(config, header); err != nil {
- return err
- }
- if !uncle && config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
- if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
- return ValidationError("Homestead gas reprice fork hash mismatch: have 0x%x, want 0x%x", header.Hash(), config.EIP150Hash)
- }
- }
- return nil
-}
-
-// CalcDifficulty is the difficulty adjustment algorithm. It returns
-// the difficulty that a new block should have when created at time
-// given the parent block's time and difficulty.
-func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
- if config.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
- return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
- } else {
- return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
- }
-}
-
-func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
- // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
- // algorithm:
- // diff = (parent_diff +
- // (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
- // ) + 2^(periodCount - 2)
-
- bigTime := new(big.Int).SetUint64(time)
- bigParentTime := new(big.Int).SetUint64(parentTime)
-
- // holds intermediate values to make the algo easier to read & audit
- x := new(big.Int)
- y := new(big.Int)
-
- // 1 - (block_timestamp -parent_timestamp) // 10
- x.Sub(bigTime, bigParentTime)
- x.Div(x, big10)
- x.Sub(common.Big1, x)
-
- // max(1 - (block_timestamp - parent_timestamp) // 10, -99)))
- if x.Cmp(bigMinus99) < 0 {
- x.Set(bigMinus99)
- }
-
- // (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
- y.Div(parentDiff, params.DifficultyBoundDivisor)
- x.Mul(y, x)
- x.Add(parentDiff, x)
-
- // minimum difficulty can ever be (before exponential factor)
- if x.Cmp(params.MinimumDifficulty) < 0 {
- x.Set(params.MinimumDifficulty)
- }
-
- // for the exponential factor
- periodCount := new(big.Int).Add(parentNumber, common.Big1)
- periodCount.Div(periodCount, ExpDiffPeriod)
-
- // the exponential factor, commonly referred to as "the bomb"
- // diff = diff + 2^(periodCount - 2)
- if periodCount.Cmp(common.Big1) > 0 {
- y.Sub(periodCount, common.Big2)
- y.Exp(common.Big2, y, nil)
- x.Add(x, y)
- }
-
- return x
-}
-
-func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
- diff := new(big.Int)
- adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
- bigTime := new(big.Int)
- bigParentTime := new(big.Int)
-
- bigTime.SetUint64(time)
- bigParentTime.SetUint64(parentTime)
-
- if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
- diff.Add(parentDiff, adjust)
- } else {
- diff.Sub(parentDiff, adjust)
- }
- if diff.Cmp(params.MinimumDifficulty) < 0 {
- diff.Set(params.MinimumDifficulty)
- }
-
- periodCount := new(big.Int).Add(parentNumber, common.Big1)
- periodCount.Div(periodCount, ExpDiffPeriod)
- if periodCount.Cmp(common.Big1) > 0 {
- // diff = diff + 2^(periodCount - 2)
- expDiff := periodCount.Sub(periodCount, common.Big2)
- expDiff.Exp(common.Big2, expDiff, nil)
- diff.Add(diff, expDiff)
- diff = math.BigMax(diff, params.MinimumDifficulty)
- }
-
- return diff
-}
-
// CalcGasLimit computes the gas limit of the next block after parent.
// The result may be modified by the caller.
// This is miner strategy, not consensus protocol.
diff --git a/core/block_validator_test.go b/core/block_validator_test.go
index a07dd9e51b..abe1766b4f 100644
--- a/core/block_validator_test.go
+++ b/core/block_validator_test.go
@@ -17,64 +17,179 @@
package core
import (
- "math/big"
+ "runtime"
"testing"
+ "time"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
-func testGenesis(account common.Address, balance *big.Int) *Genesis {
- return &Genesis{
- Config: params.TestChainConfig,
- Alloc: GenesisAlloc{account: {Balance: balance}},
+// Tests that simple header verification works, for both good and bad blocks.
+func TestHeaderVerification(t *testing.T) {
+ // Create a simple chain to verify
+ var (
+ testdb, _ = ethdb.NewMemDatabase()
+ gspec = &Genesis{Config: params.TestChainConfig}
+ genesis = gspec.MustCommit(testdb)
+ blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
+ )
+ headers := make([]*types.Header, len(blocks))
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ }
+ // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces
+ chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+
+ for i := 0; i < len(blocks); i++ {
+ for j, valid := range []bool{true, false} {
+ var results <-chan error
+
+ if valid {
+ engine := ethash.NewFaker()
+ _, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}, []bool{true})
+ } else {
+ engine := ethash.NewFakeFailer(headers[i].Number.Uint64())
+ _, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}, []bool{true})
+ }
+ // Wait for the verification result
+ select {
+ case result := <-results:
+ if (result == nil) != valid {
+ t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, result, valid)
+ }
+ case <-time.After(time.Second):
+ t.Fatalf("test %d.%d: verification timeout", i, j)
+ }
+ // Make sure no more data is returned
+ select {
+ case result := <-results:
+ t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
+ case <-time.After(25 * time.Millisecond):
+ }
+ }
+ chain.InsertChain(blocks[i : i+1])
}
}
-func TestNumber(t *testing.T) {
- chain := newTestBlockChain()
- statedb, _ := state.New(chain.Genesis().Root(), chain.chainDb)
- header := makeHeader(chain.config, chain.Genesis(), statedb)
- header.Number = big.NewInt(3)
- err := ValidateHeader(chain.config, pow.FakePow{}, header, chain.Genesis().Header(), false, false)
- if err != BlockNumberErr {
- t.Errorf("expected block number error, got %q", err)
+// Tests that concurrent header verification works, for both good and bad blocks.
+func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) }
+func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) }
+func TestHeaderConcurrentVerification32(t *testing.T) { testHeaderConcurrentVerification(t, 32) }
+
+func testHeaderConcurrentVerification(t *testing.T, threads int) {
+ // Create a simple chain to verify
+ var (
+ testdb, _ = ethdb.NewMemDatabase()
+ gspec = &Genesis{Config: params.TestChainConfig}
+ genesis = gspec.MustCommit(testdb)
+ blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
+ )
+ headers := make([]*types.Header, len(blocks))
+ seals := make([]bool, len(blocks))
+
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ seals[i] = true
}
+ // Set the number of threads to verify on
+ old := runtime.GOMAXPROCS(threads)
+ defer runtime.GOMAXPROCS(old)
+
+ // Run the header checker for the entire block chain at once both for a valid and
+ // also an invalid chain (enough if one arbitrary block is invalid).
+ for i, valid := range []bool{true, false} {
+ var results <-chan error
+
+ if valid {
+ chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
+ _, results = chain.engine.VerifyHeaders(chain, headers, seals)
+ } else {
+ chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFakeFailer(uint64(len(headers)-1)), new(event.TypeMux), vm.Config{})
+ _, results = chain.engine.VerifyHeaders(chain, headers, seals)
+ }
+ // Wait for all the verification results
+ checks := make(map[int]error)
+ for j := 0; j < len(blocks); j++ {
+ select {
+ case result := <-results:
+ checks[j] = result
- header = makeHeader(chain.config, chain.Genesis(), statedb)
- err = ValidateHeader(chain.config, pow.FakePow{}, header, chain.Genesis().Header(), false, false)
- if err == BlockNumberErr {
- t.Errorf("didn't expect block number error")
+ case <-time.After(time.Second):
+ t.Fatalf("test %d.%d: verification timeout", i, j)
+ }
+ }
+ // Check nonce check validity
+ for j := 0; j < len(blocks); j++ {
+ want := valid || (j < len(blocks)-2) // We chose the last-but-one nonce in the chain to fail
+ if (checks[j] == nil) != want {
+ t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, checks[j], want)
+ }
+ if !want {
+ // A few blocks after the first error may pass verification due to concurrent
+ // workers. We don't care about those in this test, just that the correct block
+ // errors out.
+ break
+ }
+ }
+ // Make sure no more data is returned
+ select {
+ case result := <-results:
+ t.Fatalf("test %d: unexpected result returned: %v", i, result)
+ case <-time.After(25 * time.Millisecond):
+ }
}
}
-func TestPutReceipt(t *testing.T) {
- db, _ := ethdb.NewMemDatabase()
-
- var addr common.Address
- addr[0] = 1
- var hash common.Hash
- hash[0] = 2
-
- receipt := new(types.Receipt)
- receipt.Logs = []*types.Log{{
- Address: addr,
- Topics: []common.Hash{hash},
- Data: []byte("hi"),
- BlockNumber: 42,
- TxHash: hash,
- TxIndex: 0,
- BlockHash: hash,
- Index: 0,
- }}
-
- WriteReceipts(db, types.Receipts{receipt})
- receipt = GetReceipt(db, common.Hash{})
- if receipt == nil {
- t.Error("expected to get 1 receipt, got none.")
+// Tests that aborting a header validation indeed prevents further checks from being
+// run, as well as checks that no left-over goroutines are leaked.
+func TestHeaderConcurrentAbortion2(t *testing.T) { testHeaderConcurrentAbortion(t, 2) }
+func TestHeaderConcurrentAbortion8(t *testing.T) { testHeaderConcurrentAbortion(t, 8) }
+func TestHeaderConcurrentAbortion32(t *testing.T) { testHeaderConcurrentAbortion(t, 32) }
+
+func testHeaderConcurrentAbortion(t *testing.T, threads int) {
+ // Create a simple chain to verify
+ var (
+ testdb, _ = ethdb.NewMemDatabase()
+ gspec = &Genesis{Config: params.TestChainConfig}
+ genesis = gspec.MustCommit(testdb)
+ blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 1024, nil)
+ )
+ headers := make([]*types.Header, len(blocks))
+ seals := make([]bool, len(blocks))
+
+ for i, block := range blocks {
+ headers[i] = block.Header()
+ seals[i] = true
+ }
+ // Set the number of threads to verify on
+ old := runtime.GOMAXPROCS(threads)
+ defer runtime.GOMAXPROCS(old)
+
+ // Start the verifications and immediately abort
+ chain, _ := NewBlockChain(testdb, params.TestChainConfig, ethash.NewFakeDelayer(time.Millisecond), new(event.TypeMux), vm.Config{})
+ abort, results := chain.engine.VerifyHeaders(chain, headers, seals)
+ close(abort)
+
+ // Deplete the results channel
+ verified := 0
+ for depleted := false; !depleted; {
+ select {
+ case result := <-results:
+ if result != nil {
+ t.Errorf("header %d: validation failed: %v", verified, result)
+ }
+ verified++
+ case <-time.After(50 * time.Millisecond):
+ depleted = true
+ }
+ }
+ // Check that abortion was honored by not processing too many POWs
+ if verified > 2*threads {
+ t.Errorf("verification count too large: have %d, want below %d", verified, 2*threads)
}
}
diff --git a/core/blockchain.go b/core/blockchain.go
index a57832df02..4793431d8e 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -30,6 +30,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -39,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/hashicorp/golang-lru"
@@ -104,7 +104,7 @@ type BlockChain struct {
procInterrupt int32 // interrupt signaler for block processing
wg sync.WaitGroup // chain processing wait group for shutting down
- pow pow.PoW
+ engine consensus.Engine
processor Processor // block processor interface
validator Validator // block and state validator interface
vmConfig vm.Config
@@ -115,7 +115,7 @@ type BlockChain struct {
// NewBlockChain returns a fully initialised block chain using information
// available in the database. It initialiser the default Ethereum Validator and
// Processor.
-func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.PoW, mux *event.TypeMux, vmConfig vm.Config) (*BlockChain, error) {
+func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, mux *event.TypeMux, vmConfig vm.Config) (*BlockChain, error) {
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
@@ -131,25 +131,22 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P
bodyRLPCache: bodyRLPCache,
blockCache: blockCache,
futureBlocks: futureBlocks,
- pow: pow,
+ engine: engine,
vmConfig: vmConfig,
badBlocks: badBlocks,
}
- bc.SetValidator(NewBlockValidator(config, bc, pow))
- bc.SetProcessor(NewStateProcessor(config, bc))
+ bc.SetValidator(NewBlockValidator(config, bc, engine))
+ bc.SetProcessor(NewStateProcessor(config, bc, engine))
- gv := func() HeaderValidator { return bc.Validator() }
var err error
- bc.hc, err = NewHeaderChain(chainDb, config, gv, bc.getProcInterrupt)
+ bc.hc, err = NewHeaderChain(chainDb, config, engine, bc.getProcInterrupt)
if err != nil {
return nil, err
}
-
bc.genesisBlock = bc.GetBlockByNumber(0)
if bc.genesisBlock == nil {
return nil, ErrNoGenesis
}
-
if err := bc.loadLastState(); err != nil {
return nil, err
}
@@ -233,9 +230,6 @@ func (self *BlockChain) loadLastState() error {
log.Info("Loaded most recent local full block", "number", self.currentBlock.Number(), "hash", self.currentBlock.Hash(), "td", blockTd)
log.Info("Loaded most recent local fast block", "number", self.currentFastBlock.Number(), "hash", self.currentFastBlock.Hash(), "td", fastTd)
- // Try to be smart and issue a pow verification for the head to pre-generate caches
- go self.pow.Verify(types.NewBlockWithHeader(currentHeader))
-
return nil
}
@@ -383,9 +377,6 @@ func (self *BlockChain) Processor() Processor {
return self.processor
}
-// AuxValidator returns the auxiliary validator (Proof of work atm)
-func (self *BlockChain) AuxValidator() pow.PoW { return self.pow }
-
// State returns a new mutable state based on the current HEAD block.
func (self *BlockChain) State() (*state.StateDB, error) {
return self.StateAt(self.CurrentBlock().Root())
@@ -906,38 +897,38 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
stats = insertStats{startTime: mclock.Now()}
events = make([]interface{}, 0, len(chain))
coalescedLogs []*types.Log
- nonceChecked = make([]bool, len(chain))
)
+ // Start the parallel header verifier
+ headers := make([]*types.Header, len(chain))
+ seals := make([]bool, len(chain))
- // Start the parallel nonce verifier.
- nonceAbort, nonceResults := verifyNoncesFromBlocks(self.pow, chain)
- defer close(nonceAbort)
+ for i, block := range chain {
+ headers[i] = block.Header()
+ seals[i] = true
+ }
+ abort, results := self.engine.VerifyHeaders(self, headers, seals)
+ defer close(abort)
+ // Iterate over the blocks and insert when the verifier permits
for i, block := range chain {
+ // If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&self.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
break
}
- bstart := time.Now()
- // Wait for block i's nonce to be verified before processing
- // its state transition.
- for !nonceChecked[i] {
- r := <-nonceResults
- nonceChecked[r.index] = true
- if !r.valid {
- invalid := chain[r.index]
- return r.index, &BlockNonceErr{Hash: invalid.Hash(), Number: invalid.Number(), Nonce: invalid.Nonce()}
- }
- }
-
+ // If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
err := BadHashError(block.Hash())
self.reportBlock(block, nil, err)
return i, err
}
- // Stage 1 validation of the block using the chain's validator
- // interface.
- err := self.Validator().ValidateBlock(block)
+ // Wait for the block's verification to complete
+ bstart := time.Now()
+
+ err := <-results
+ if err == nil {
+ err = self.Validator().ValidateBody(block)
+ }
if err != nil {
if IsKnownBlockErr(err) {
stats.ignored++
@@ -952,7 +943,6 @@ func (self *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if block.Time().Cmp(max) == 1 {
return i, fmt.Errorf("%v: BlockFutureErr, %v > %v", BlockFutureErr, block.Time(), max)
}
-
self.futureBlocks.Add(block.Hash(), block)
stats.queued++
continue
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index 3da133a9ea..b2fb226dbb 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -31,18 +32,21 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// newTestBlockChain creates a blockchain without validation.
-func newTestBlockChain() *BlockChain {
+func newTestBlockChain(fake bool) *BlockChain {
db, _ := ethdb.NewMemDatabase()
gspec := &Genesis{
Config: params.TestChainConfig,
Difficulty: big.NewInt(1),
}
gspec.MustCommit(db)
- blockchain, err := NewBlockChain(db, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ engine := ethash.NewFullFaker()
+ if !fake {
+ engine = ethash.NewTester()
+ }
+ blockchain, err := NewBlockChain(db, gspec.Config, engine, new(event.TypeMux), vm.Config{})
if err != nil {
panic(err)
}
@@ -117,7 +121,10 @@ func printChain(bc *BlockChain) {
func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
for _, block := range chain {
// Try and process the block
- err := blockchain.Validator().ValidateBlock(block)
+ err := blockchain.engine.VerifyHeader(blockchain, block.Header(), true)
+ if err == nil {
+ err = blockchain.validator.ValidateBody(block)
+ }
if err != nil {
if IsKnownBlockErr(err) {
continue
@@ -133,7 +140,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.reportBlock(block, receipts, err)
return err
}
- err = blockchain.Validator().ValidateState(block, blockchain.GetBlockByHash(block.ParentHash()), statedb, receipts, usedGas)
+ err = blockchain.validator.ValidateState(block, blockchain.GetBlockByHash(block.ParentHash()), statedb, receipts, usedGas)
if err != nil {
blockchain.reportBlock(block, receipts, err)
return err
@@ -152,7 +159,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
for _, header := range chain {
// Try and validate the header
- if err := blockchain.Validator().ValidateHeader(header, blockchain.GetHeaderByHash(header.ParentHash), false); err != nil {
+ if err := blockchain.engine.VerifyHeader(blockchain, header, false); err != nil {
return err
}
// Manually insert the header into the database, but don't reorganise (allows subsequent testing)
@@ -174,7 +181,7 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
}
func TestLastBlock(t *testing.T) {
- bchain := newTestBlockChain()
+ bchain := newTestBlockChain(false)
block := makeBlockChain(bchain.CurrentBlock(), 1, bchain.chainDb, 0)[0]
bchain.insert(block)
if block.Hash() != GetHeadBlockHash(bchain.chainDb) {
@@ -318,8 +325,7 @@ func testBrokenChain(t *testing.T, full bool) {
type bproc struct{}
-func (bproc) ValidateBlock(*types.Block) error { return nil }
-func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil }
+func (bproc) ValidateBody(*types.Block) error { return nil }
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error {
return nil
}
@@ -378,7 +384,7 @@ func testReorgShort(t *testing.T, full bool) {
}
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
- bc := newTestBlockChain()
+ bc := newTestBlockChain(true)
// Insert an easy and a difficult chain afterwards
if full {
@@ -422,7 +428,7 @@ func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
func testBadHashes(t *testing.T, full bool) {
- bc := newTestBlockChain()
+ bc := newTestBlockChain(true)
// Create a chain, ban a hash and try to import
var err error
@@ -446,7 +452,7 @@ func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
func testReorgBadHashes(t *testing.T, full bool) {
- bc := newTestBlockChain()
+ bc := newTestBlockChain(true)
// Create a chain, import and ban afterwards
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
@@ -473,7 +479,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
}
// Create a new BlockChain and check that it rolled back the state.
- ncm, err := NewBlockChain(bc.chainDb, bc.config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ ncm, err := NewBlockChain(bc.chainDb, bc.config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
@@ -504,46 +510,34 @@ func testInsertNonceError(t *testing.T, full bool) {
}
// Create and insert a chain with a failing nonce
var (
- failAt int
- failRes int
- failNum uint64
- failHash common.Hash
+ failAt int
+ failRes int
+ failNum uint64
)
if full {
blocks := makeBlockChain(blockchain.CurrentBlock(), i, db, 0)
failAt = rand.Int() % len(blocks)
failNum = blocks[failAt].NumberU64()
- failHash = blocks[failAt].Hash()
-
- blockchain.pow = failPow{failNum}
+ blockchain.engine = ethash.NewFakeFailer(failNum)
failRes, err = blockchain.InsertChain(blocks)
} else {
headers := makeHeaderChain(blockchain.CurrentHeader(), i, db, 0)
failAt = rand.Int() % len(headers)
failNum = headers[failAt].Number.Uint64()
- failHash = headers[failAt].Hash()
-
- blockchain.pow = failPow{failNum}
- blockchain.validator = NewBlockValidator(params.TestChainConfig, blockchain, failPow{failNum})
+ blockchain.engine = ethash.NewFakeFailer(failNum)
+ blockchain.hc.engine = blockchain.engine
failRes, err = blockchain.InsertHeaderChain(headers, 1)
}
// Check that the returned error indicates the nonce failure.
if failRes != failAt {
t.Errorf("test %d: failure index mismatch: have %d, want %d", i, failRes, failAt)
}
- if !IsBlockNonceErr(err) {
- t.Fatalf("test %d: error mismatch: have %v, want nonce error %T", i, err, err)
- }
- nerr := err.(*BlockNonceErr)
- if nerr.Number.Uint64() != failNum {
- t.Errorf("test %d: number mismatch: have %v, want %v", i, nerr.Number, failNum)
- }
- if nerr.Hash != failHash {
- t.Errorf("test %d: hash mismatch: have %x, want %x", i, nerr.Hash[:4], failHash[:4])
+ if err != ethash.ErrInvalidPoW {
+ t.Fatalf("test %d: error mismatch: have %v, want %v", i, err, ethash.ErrInvalidPoW)
}
// Check that all no blocks after the failing block have been inserted.
for j := 0; j < i-failAt; j++ {
@@ -569,9 +563,12 @@ func TestFastVsFullChains(t *testing.T) {
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
address = crypto.PubkeyToAddress(key.PublicKey)
funds = big.NewInt(1000000000)
- gspec = testGenesis(address, funds)
- genesis = gspec.MustCommit(gendb)
- signer = types.NewEIP155Signer(gspec.Config.ChainId)
+ gspec = &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{address: {Balance: funds}},
+ }
+ genesis = gspec.MustCommit(gendb)
+ signer = types.NewEIP155Signer(gspec.Config.ChainId)
)
blocks, receipts := GenerateChain(gspec.Config, genesis, gendb, 1024, func(i int, block *BlockGen) {
block.SetCoinbase(common.Address{0x00})
@@ -594,7 +591,7 @@ func TestFastVsFullChains(t *testing.T) {
// Import the chain as an archive node for the comparison baseline
archiveDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ archive, _ := NewBlockChain(archiveDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
@@ -603,7 +600,7 @@ func TestFastVsFullChains(t *testing.T) {
// Fast import the chain as a non-archive node to test
fastDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ fast, _ := NewBlockChain(fastDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -680,8 +677,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
archiveDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(archiveDb)
- archive, _ := NewBlockChain(archiveDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
-
+ archive, _ := NewBlockChain(archiveDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if n, err := archive.InsertChain(blocks); err != nil {
t.Fatalf("failed to process block %d: %v", n, err)
}
@@ -692,7 +688,7 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a non-archive node and ensure all pointers are updated
fastDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(fastDb)
- fast, _ := NewBlockChain(fastDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ fast, _ := NewBlockChain(fastDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
@@ -711,8 +707,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
// Import the chain as a light node and ensure all pointers are updated
lightDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(lightDb)
- light, _ := NewBlockChain(lightDb, gspec.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ light, _ := NewBlockChain(lightDb, gspec.Config, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to insert header %d: %v", n, err)
}
@@ -780,7 +776,7 @@ func TestChainTxReorgs(t *testing.T) {
})
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
if i, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert original chain[%d]: %v", i, err)
}
@@ -851,7 +847,7 @@ func TestLogReorgs(t *testing.T) {
)
var evmux event.TypeMux
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, &evmux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &evmux, vm.Config{})
subs := evmux.Subscribe(RemovedLogsEvent{})
chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 2, func(i int, gen *BlockGen) {
@@ -883,13 +879,16 @@ func TestReorgSideEvent(t *testing.T) {
db, _ = ethdb.NewMemDatabase()
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- gspec = testGenesis(addr1, big.NewInt(10000000000000))
+ gspec = &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}},
+ }
genesis = gspec.MustCommit(db)
signer = types.NewEIP155Signer(gspec.Config.ChainId)
)
evmux := &event.TypeMux{}
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
chain, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(chain); err != nil {
@@ -959,7 +958,7 @@ done:
// Tests if the canonical block can be fetched from the database during chain insertion.
func TestCanonicalBlockRetrieval(t *testing.T) {
- bc := newTestBlockChain()
+ bc := newTestBlockChain(false)
chain, _ := GenerateChain(bc.config, bc.genesisBlock, bc.chainDb, 10, func(i int, gen *BlockGen) {})
for i := range chain {
@@ -1004,7 +1003,7 @@ func TestEIP155Transition(t *testing.T) {
mux event.TypeMux
)
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, &mux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
blocks, _ := GenerateChain(gspec.Config, genesis, db, 4, func(i int, block *BlockGen) {
var (
tx *types.Transaction
@@ -1110,7 +1109,7 @@ func TestEIP161AccountRemoval(t *testing.T) {
}
genesis = gspec.MustCommit(db)
mux event.TypeMux
- blockchain, _ = NewBlockChain(db, gspec.Config, pow.FakePow{}, &mux, vm.Config{})
+ blockchain, _ = NewBlockChain(db, gspec.Config, ethash.NewFaker(), &mux, vm.Config{})
)
blocks, _ := GenerateChain(gspec.Config, genesis, db, 3, func(i int, block *BlockGen) {
var (
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 5bf1ece25b..c47c719f64 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -21,13 +21,14 @@ import (
"math/big"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
+ "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// So we can deterministically seed different blockchains
@@ -141,7 +142,7 @@ func (b *BlockGen) OffsetTime(seconds int64) {
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
panic("block time out of range")
}
- b.header.Difficulty = CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
+ b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
}
// GenerateChain creates a chain of n blocks. The first block's
@@ -173,13 +174,13 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, db ethdb.Dat
}
}
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(h.Number) == 0 {
- ApplyDAOHardFork(statedb)
+ misc.ApplyDAOHardFork(statedb)
}
// Execute any user modifications to the block and finalize it
if gen != nil {
gen(i, b)
}
- AccumulateRewards(statedb, h, b.uncles)
+ ethash.AccumulateRewards(statedb, h, b.uncles)
root, err := statedb.Commit(config.IsEIP158(h.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
@@ -212,7 +213,7 @@ func makeHeader(config *params.ChainConfig, parent *types.Block, state *state.St
Root: state.IntermediateRoot(config.IsEIP158(parent.Number())),
ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(),
- Difficulty: CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
+ Difficulty: ethash.CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
GasLimit: CalcGasLimit(parent),
GasUsed: new(big.Int),
Number: new(big.Int).Add(parent.Number(), common.Big1),
@@ -229,7 +230,7 @@ func newCanonical(n int, full bool) (ethdb.Database, *BlockChain, error) {
db, _ := ethdb.NewMemDatabase()
genesis := gspec.MustCommit(db)
- blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ blockchain, _ := NewBlockChain(db, params.AllProtocolChanges, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go
index a8f77cdf3e..3a7c62396f 100644
--- a/core/chain_makers_test.go
+++ b/core/chain_makers_test.go
@@ -20,13 +20,13 @@ import (
"fmt"
"math/big"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
func ExampleGenerateChain() {
@@ -81,7 +81,7 @@ func ExampleGenerateChain() {
// Import the chain. This runs all block validation rules.
evmux := &event.TypeMux{}
- blockchain, _ := NewBlockChain(db, gspec.Config, pow.FakePow{}, evmux, vm.Config{})
+ blockchain, _ := NewBlockChain(db, gspec.Config, ethash.NewFaker(), evmux, vm.Config{})
if i, err := blockchain.InsertChain(chain); err != nil {
fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err)
return
diff --git a/core/chain_pow.go b/core/chain_pow.go
deleted file mode 100644
index e5ccd87e20..0000000000
--- a/core/chain_pow.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "runtime"
-
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/pow"
-)
-
-// nonceCheckResult contains the result of a nonce verification.
-type nonceCheckResult struct {
- index int // Index of the item verified from an input array
- valid bool // Result of the nonce verification
-}
-
-// verifyNoncesFromHeaders starts a concurrent header nonce verification,
-// returning a quit channel to abort the operations and a results channel
-// to retrieve the async verifications.
-func verifyNoncesFromHeaders(checker pow.PoW, headers []*types.Header) (chan<- struct{}, <-chan nonceCheckResult) {
- items := make([]pow.Block, len(headers))
- for i, header := range headers {
- items[i] = types.NewBlockWithHeader(header)
- }
- return verifyNonces(checker, items)
-}
-
-// verifyNoncesFromBlocks starts a concurrent block nonce verification,
-// returning a quit channel to abort the operations and a results channel
-// to retrieve the async verifications.
-func verifyNoncesFromBlocks(checker pow.PoW, blocks []*types.Block) (chan<- struct{}, <-chan nonceCheckResult) {
- items := make([]pow.Block, len(blocks))
- for i, block := range blocks {
- items[i] = block
- }
- return verifyNonces(checker, items)
-}
-
-// verifyNonces starts a concurrent nonce verification, returning a quit channel
-// to abort the operations and a results channel to retrieve the async checks.
-func verifyNonces(checker pow.PoW, items []pow.Block) (chan<- struct{}, <-chan nonceCheckResult) {
- // Spawn as many workers as allowed threads
- workers := runtime.GOMAXPROCS(0)
- if len(items) < workers {
- workers = len(items)
- }
- // Create a task channel and spawn the verifiers
- tasks := make(chan int, workers)
- results := make(chan nonceCheckResult, len(items)) // Buffered to make sure all workers stop
- for i := 0; i < workers; i++ {
- go func() {
- for index := range tasks {
- results <- nonceCheckResult{index: index, valid: checker.Verify(items[index]) == nil}
- }
- }()
- }
- // Feed item indices to the workers until done or aborted
- abort := make(chan struct{})
- go func() {
- defer close(tasks)
-
- for i := range items {
- select {
- case tasks <- i:
- continue
- case <-abort:
- return
- }
- }
- }()
- return abort, results
-}
diff --git a/core/chain_pow_test.go b/core/chain_pow_test.go
deleted file mode 100644
index 311ca128e6..0000000000
--- a/core/chain_pow_test.go
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package core
-
-import (
- "errors"
- "math/big"
- "runtime"
- "testing"
- "time"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
-)
-
-// failPow is a non-validating proof of work implementation, that returns true
-// from Verify for all but one block.
-type failPow struct {
- failing uint64
-}
-
-func (pow failPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
- return 0, nil
-}
-func (pow failPow) Verify(block pow.Block) error {
- if block.NumberU64() == pow.failing {
- return errors.New("failed")
- }
- return nil
-}
-func (pow failPow) Hashrate() float64 { return 0 }
-
-// delayedPow is a non-validating proof of work implementation, that returns true
-// from Verify for all blocks, but delays them the configured amount of time.
-type delayedPow struct {
- delay time.Duration
-}
-
-func (pow delayedPow) Search(pow.Block, <-chan struct{}) (uint64, []byte) {
- return 0, nil
-}
-func (pow delayedPow) Verify(block pow.Block) error { time.Sleep(pow.delay); return nil }
-func (pow delayedPow) Hashrate() float64 { return 0 }
-
-// Tests that simple POW verification works, for both good and bad blocks.
-func TestPowVerification(t *testing.T) {
- // Create a simple chain to verify
- var (
- testdb, _ = ethdb.NewMemDatabase()
- genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
- blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
- )
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- // Run the POW checker for blocks one-by-one, checking for both valid and invalid nonces
- for i := 0; i < len(blocks); i++ {
- for j, full := range []bool{true, false} {
- for k, valid := range []bool{true, false} {
- var results <-chan nonceCheckResult
-
- switch {
- case full && valid:
- _, results = verifyNoncesFromBlocks(pow.FakePow{}, []*types.Block{blocks[i]})
- case full && !valid:
- _, results = verifyNoncesFromBlocks(failPow{blocks[i].NumberU64()}, []*types.Block{blocks[i]})
- case !full && valid:
- _, results = verifyNoncesFromHeaders(pow.FakePow{}, []*types.Header{headers[i]})
- case !full && !valid:
- _, results = verifyNoncesFromHeaders(failPow{headers[i].Number.Uint64()}, []*types.Header{headers[i]})
- }
- // Wait for the verification result
- select {
- case result := <-results:
- if result.index != 0 {
- t.Errorf("test %d.%d.%d: invalid index: have %d, want 0", i, j, k, result.index)
- }
- if result.valid != valid {
- t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, result.valid, valid)
- }
- case <-time.After(time.Second):
- t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
- }
- // Make sure no more data is returned
- select {
- case result := <-results:
- t.Fatalf("test %d.%d.%d: unexpected result returned: %v", i, j, k, result)
- case <-time.After(25 * time.Millisecond):
- }
- }
- }
- }
-}
-
-// Tests that concurrent POW verification works, for both good and bad blocks.
-func TestPowConcurrentVerification2(t *testing.T) { testPowConcurrentVerification(t, 2) }
-func TestPowConcurrentVerification8(t *testing.T) { testPowConcurrentVerification(t, 8) }
-func TestPowConcurrentVerification32(t *testing.T) { testPowConcurrentVerification(t, 32) }
-
-func testPowConcurrentVerification(t *testing.T, threads int) {
- // Create a simple chain to verify
- var (
- testdb, _ = ethdb.NewMemDatabase()
- genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
- blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 8, nil)
- )
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- // Set the number of threads to verify on
- old := runtime.GOMAXPROCS(threads)
- defer runtime.GOMAXPROCS(old)
-
- // Run the POW checker for the entire block chain at once both for a valid and
- // also an invalid chain (enough if one is invalid, last but one (arbitrary)).
- for i, full := range []bool{true, false} {
- for j, valid := range []bool{true, false} {
- var results <-chan nonceCheckResult
-
- switch {
- case full && valid:
- _, results = verifyNoncesFromBlocks(pow.FakePow{}, blocks)
- case full && !valid:
- _, results = verifyNoncesFromBlocks(failPow{uint64(len(blocks) - 1)}, blocks)
- case !full && valid:
- _, results = verifyNoncesFromHeaders(pow.FakePow{}, headers)
- case !full && !valid:
- _, results = verifyNoncesFromHeaders(failPow{uint64(len(headers) - 1)}, headers)
- }
- // Wait for all the verification results
- checks := make(map[int]bool)
- for k := 0; k < len(blocks); k++ {
- select {
- case result := <-results:
- if _, ok := checks[result.index]; ok {
- t.Fatalf("test %d.%d.%d: duplicate results for %d", i, j, k, result.index)
- }
- if result.index < 0 || result.index >= len(blocks) {
- t.Fatalf("test %d.%d.%d: result %d out of bounds [%d, %d]", i, j, k, result.index, 0, len(blocks)-1)
- }
- checks[result.index] = result.valid
-
- case <-time.After(time.Second):
- t.Fatalf("test %d.%d.%d: verification timeout", i, j, k)
- }
- }
- // Check nonce check validity
- for k := 0; k < len(blocks); k++ {
- want := valid || (k != len(blocks)-2) // We chose the last but one nonce in the chain to fail
- if checks[k] != want {
- t.Errorf("test %d.%d.%d: validity mismatch: have %v, want %v", i, j, k, checks[k], want)
- }
- }
- // Make sure no more data is returned
- select {
- case result := <-results:
- t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result)
- case <-time.After(25 * time.Millisecond):
- }
- }
- }
-}
-
-// Tests that aborting a POW validation indeed prevents further checks from being
-// run, as well as checks that no left-over goroutines are leaked.
-func TestPowConcurrentAbortion2(t *testing.T) { testPowConcurrentAbortion(t, 2) }
-func TestPowConcurrentAbortion8(t *testing.T) { testPowConcurrentAbortion(t, 8) }
-func TestPowConcurrentAbortion32(t *testing.T) { testPowConcurrentAbortion(t, 32) }
-
-func testPowConcurrentAbortion(t *testing.T, threads int) {
- // Create a simple chain to verify
- var (
- testdb, _ = ethdb.NewMemDatabase()
- genesis = GenesisBlockForTesting(testdb, common.Address{}, new(big.Int))
- blocks, _ = GenerateChain(params.TestChainConfig, genesis, testdb, 1024, nil)
- )
- headers := make([]*types.Header, len(blocks))
- for i, block := range blocks {
- headers[i] = block.Header()
- }
- // Set the number of threads to verify on
- old := runtime.GOMAXPROCS(threads)
- defer runtime.GOMAXPROCS(old)
-
- // Run the POW checker for the entire block chain at once
- for i, full := range []bool{true, false} {
- var abort chan<- struct{}
- var results <-chan nonceCheckResult
-
- // Start the verifications and immediately abort
- if full {
- abort, results = verifyNoncesFromBlocks(delayedPow{time.Millisecond}, blocks)
- } else {
- abort, results = verifyNoncesFromHeaders(delayedPow{time.Millisecond}, headers)
- }
- close(abort)
-
- // Deplete the results channel
- verified := make(map[int]struct{})
- for depleted := false; !depleted; {
- select {
- case result := <-results:
- verified[result.index] = struct{}{}
- case <-time.After(50 * time.Millisecond):
- depleted = true
- }
- }
- // Check that abortion was honored by not processing too many POWs
- if len(verified) > 2*threads {
- t.Errorf("test %d: verification count too large: have %d, want below %d", i, len(verified), 2*threads)
- }
- // Check that there are no gaps in the results
- for j := 0; j < len(verified); j++ {
- if _, ok := verified[j]; !ok {
- t.Errorf("test %d.%d: gap found in verification results", i, j)
- }
- }
- }
-}
diff --git a/core/dao_test.go b/core/dao_test.go
index c0d4826303..cb6e54f8f2 100644
--- a/core/dao_test.go
+++ b/core/dao_test.go
@@ -20,11 +20,11 @@ import (
"math/big"
"testing"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// Tests that DAO-fork enabled clients can properly filter out fork-commencing
@@ -42,12 +42,12 @@ func TestDAOForkRangeExtradata(t *testing.T) {
proDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(proDb)
proConf := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: true}
- proBc, _ := NewBlockChain(proDb, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ proBc, _ := NewBlockChain(proDb, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
conDb, _ := ethdb.NewMemDatabase()
gspec.MustCommit(conDb)
conConf := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0), DAOForkBlock: forkBlock, DAOForkSupport: false}
- conBc, _ := NewBlockChain(conDb, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ conBc, _ := NewBlockChain(conDb, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
if _, err := proBc.InsertChain(prefix); err != nil {
t.Fatalf("pro-fork: failed to import chain prefix: %v", err)
@@ -60,7 +60,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a pro-fork block, and try to feed into the no-fork chain
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ bc, _ := NewBlockChain(db, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
for j := 0; j < len(blocks)/2; j++ {
@@ -81,7 +81,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Create a no-fork block, and try to feed into the pro-fork chain
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ bc, _ = NewBlockChain(db, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
for j := 0; j < len(blocks)/2; j++ {
@@ -103,7 +103,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that contra-forkers accept pro-fork extra-datas after forking finishes
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ := NewBlockChain(db, conConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ bc, _ := NewBlockChain(db, conConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64()+1))
for j := 0; j < len(blocks)/2; j++ {
@@ -119,7 +119,7 @@ func TestDAOForkRangeExtradata(t *testing.T) {
// Verify that pro-forkers accept contra-fork extra-datas after forking finishes
db, _ = ethdb.NewMemDatabase()
gspec.MustCommit(db)
- bc, _ = NewBlockChain(db, proConf, new(pow.FakePow), new(event.TypeMux), vm.Config{})
+ bc, _ = NewBlockChain(db, proConf, ethash.NewFaker(), new(event.TypeMux), vm.Config{})
blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64()+1))
for j := 0; j < len(blocks)/2; j++ {
diff --git a/core/database_util_test.go b/core/database_util_test.go
index e6be060930..9f16b660a9 100644
--- a/core/database_util_test.go
+++ b/core/database_util_test.go
@@ -18,14 +18,12 @@ package core
import (
"bytes"
- "encoding/json"
"io/ioutil"
"math/big"
"os"
"testing"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
@@ -34,58 +32,6 @@ import (
"github.com/ethereum/go-ethereum/rlp"
)
-type diffTest struct {
- ParentTimestamp uint64
- ParentDifficulty *big.Int
- CurrentTimestamp uint64
- CurrentBlocknumber *big.Int
- CurrentDifficulty *big.Int
-}
-
-func (d *diffTest) UnmarshalJSON(b []byte) (err error) {
- var ext struct {
- ParentTimestamp string
- ParentDifficulty string
- CurrentTimestamp string
- CurrentBlocknumber string
- CurrentDifficulty string
- }
- if err := json.Unmarshal(b, &ext); err != nil {
- return err
- }
-
- d.ParentTimestamp = math.MustParseUint64(ext.ParentTimestamp)
- d.ParentDifficulty = math.MustParseBig256(ext.ParentDifficulty)
- d.CurrentTimestamp = math.MustParseUint64(ext.CurrentTimestamp)
- d.CurrentBlocknumber = math.MustParseBig256(ext.CurrentBlocknumber)
- d.CurrentDifficulty = math.MustParseBig256(ext.CurrentDifficulty)
-
- return nil
-}
-
-func TestCalcDifficulty(t *testing.T) {
- file, err := os.Open("../tests/files/BasicTests/difficulty.json")
- if err != nil {
- t.Fatal(err)
- }
- defer file.Close()
-
- tests := make(map[string]diffTest)
- err = json.NewDecoder(file).Decode(&tests)
- if err != nil {
- t.Fatal(err)
- }
-
- config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
- for name, test := range tests {
- number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
- diff := CalcDifficulty(config, test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
- if diff.Cmp(test.CurrentDifficulty) != 0 {
- t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
- }
- }
-}
-
// Tests block header storage and retrieval operations.
func TestHeaderStorage(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
@@ -562,7 +508,11 @@ func TestMipmapChain(t *testing.T) {
)
defer db.Close()
- genesis := testGenesis(addr, big.NewInt(1000000)).MustCommit(db)
+ gspec := &Genesis{
+ Config: params.TestChainConfig,
+ Alloc: GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
+ }
+ genesis := gspec.MustCommit(db)
chain, receipts := GenerateChain(params.TestChainConfig, genesis, db, 1010, func(i int, gen *BlockGen) {
var receipts types.Receipts
switch i {
diff --git a/core/genesis_test.go b/core/genesis_test.go
index b73dd776fe..4312a80b89 100644
--- a/core/genesis_test.go
+++ b/core/genesis_test.go
@@ -23,11 +23,11 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
func TestDefaultGenesisBlock(t *testing.T) {
@@ -119,7 +119,7 @@ func TestSetupGenesis(t *testing.T) {
// Commit the 'old' genesis block with Homestead transition at #2.
// Advance to block #4, past the homestead transition block of customg.
genesis := oldcustomg.MustCommit(db)
- bc, _ := NewBlockChain(db, oldcustomg.Config, pow.FakePow{}, new(event.TypeMux), vm.Config{})
+ bc, _ := NewBlockChain(db, oldcustomg.Config, ethash.NewFullFaker(), new(event.TypeMux), vm.Config{})
bc.SetValidator(bproc{})
bc.InsertChain(makeBlockChainWithDiff(genesis, []int{2, 3, 4, 5}, 0))
bc.CurrentBlock()
diff --git a/core/headerchain.go b/core/headerchain.go
index e7660cc600..e2d0ff5b17 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -18,21 +18,19 @@ package core
import (
crand "crypto/rand"
+ "errors"
"fmt"
"math"
"math/big"
mrand "math/rand"
- "runtime"
- "sync"
- "sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/hashicorp/golang-lru"
)
@@ -62,18 +60,15 @@ type HeaderChain struct {
procInterrupt func() bool
- rand *mrand.Rand
- getValidator getHeaderValidatorFn
+ rand *mrand.Rand
+ engine consensus.Engine
}
-// getHeaderValidatorFn returns a HeaderValidator interface
-type getHeaderValidatorFn func() HeaderValidator
-
// NewHeaderChain creates a new HeaderChain structure.
// getValidator should return the parent's validator
// procInterrupt points to the parent's interrupt semaphore
// wg points to the parent's shutdown wait group
-func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValidator getHeaderValidatorFn, procInterrupt func() bool) (*HeaderChain, error) {
+func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
headerCache, _ := lru.New(headerCacheLimit)
tdCache, _ := lru.New(tdCacheLimit)
numberCache, _ := lru.New(numberCacheLimit)
@@ -92,7 +87,7 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, getValid
numberCache: numberCache,
procInterrupt: procInterrupt,
rand: mrand.New(mrand.NewSource(seed.Int64())),
- getValidator: getValidator,
+ engine: engine,
}
hc.genesisHeader = hc.GetHeaderByNumber(0)
@@ -228,78 +223,34 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
}
}
- // Generate the list of headers that should be POW verified
- verify := make([]bool, len(chain))
- for i := 0; i < len(verify)/checkFreq; i++ {
+ // Generate the list of seal verification requests, and start the parallel verifier
+ seals := make([]bool, len(chain))
+ for i := 0; i < len(seals)/checkFreq; i++ {
index := i*checkFreq + hc.rand.Intn(checkFreq)
- if index >= len(verify) {
- index = len(verify) - 1
+ if index >= len(seals) {
+ index = len(seals) - 1
}
- verify[index] = true
- }
- verify[len(verify)-1] = true // Last should always be verified to avoid junk
-
- // Create the header verification task queue and worker functions
- tasks := make(chan int, len(chain))
- for i := 0; i < len(chain); i++ {
- tasks <- i
+ seals[index] = true
}
- close(tasks)
+ seals[len(seals)-1] = true // Last should always be verified to avoid junk
- errs, failed := make([]error, len(tasks)), int32(0)
- process := func(worker int) {
- for index := range tasks {
- header, hash := chain[index], chain[index].Hash()
+ abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
+ defer close(abort)
- // Short circuit insertion if shutting down or processing failed
- if hc.procInterrupt() {
- return
- }
- if atomic.LoadInt32(&failed) > 0 {
- return
- }
- // Short circuit if the header is bad or already known
- if BadHashes[hash] {
- errs[index] = BadHashError(hash)
- atomic.AddInt32(&failed, 1)
- return
- }
- if hc.HasHeader(hash) {
- continue
- }
- // Verify that the header honors the chain parameters
- checkPow := verify[index]
-
- var err error
- if index == 0 {
- err = hc.getValidator().ValidateHeader(header, hc.GetHeader(header.ParentHash, header.Number.Uint64()-1), checkPow)
- } else {
- err = hc.getValidator().ValidateHeader(header, chain[index-1], checkPow)
- }
- if err != nil {
- errs[index] = err
- atomic.AddInt32(&failed, 1)
- return
- }
+ // Iterate over the headers and ensure they all check out
+ for i, header := range chain {
+ // If the chain is terminating, stop processing blocks
+ if hc.procInterrupt() {
+ log.Debug("Premature abort during headers verification")
+ return 0, errors.New("aborted")
}
- }
- // Start as many worker threads as goroutines allowed
- pending := new(sync.WaitGroup)
- for i := 0; i < runtime.GOMAXPROCS(0); i++ {
- pending.Add(1)
- go func(id int) {
- defer pending.Done()
- process(id)
- }(i)
- }
- pending.Wait()
-
- // If anything failed, report
- if failed > 0 {
- for i, err := range errs {
- if err != nil {
- return i, err
- }
+ // If the header is a banned one, straight out abort
+ if BadHashes[header.Hash()] {
+ return i, BadHashError(header.Hash())
+ }
+ // Otherwise wait for headers checks and ensure they pass
+ if err := <-results; err != nil {
+ return i, err
}
}
@@ -313,13 +264,11 @@ func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCa
for i, header := range chain {
// Short circuit insertion if shutting down
if hc.procInterrupt() {
- log.Debug("Premature abort during headers processing")
- break
+ log.Debug("Premature abort during headers import")
+ return i, errors.New("aborted")
}
- hash := header.Hash()
-
// If the header's already known, skip it, otherwise store
- if hc.HasHeader(hash) {
+ if hc.GetHeader(header.Hash(), header.Number.Uint64()) != nil {
stats.ignored++
continue
}
@@ -490,35 +439,11 @@ func (hc *HeaderChain) SetGenesis(head *types.Header) {
hc.genesisHeader = head
}
-// headerValidator is responsible for validating block headers
-//
-// headerValidator implements HeaderValidator.
-type headerValidator struct {
- config *params.ChainConfig
- hc *HeaderChain // Canonical header chain
- Pow pow.PoW // Proof of work used for validating
-}
-
-// NewBlockValidator returns a new block validator which is safe for re-use
-func NewHeaderValidator(config *params.ChainConfig, chain *HeaderChain, pow pow.PoW) HeaderValidator {
- return &headerValidator{
- config: config,
- Pow: pow,
- hc: chain,
- }
-}
+// Config retrieves the header chain's chain configuration.
+func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config }
-// ValidateHeader validates the given header and, depending on the pow arg,
-// checks the proof of work of the given header. Returns an error if the
-// validation failed.
-func (v *headerValidator) ValidateHeader(header, parent *types.Header, checkPow bool) error {
- // Short circuit if the parent is missing.
- if parent == nil {
- return ParentError(header.ParentHash)
- }
- // Short circuit if the header's already known or its parent missing
- if v.hc.HasHeader(header.Hash()) {
- return nil
- }
- return ValidateHeader(v.config, v.Pow, header, parent, checkPow, false)
+// GetBlock implements consensus.ChainReader, and returns nil for every input as
+// a header chain does not have blocks available for retrieval.
+func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block {
+ return nil
}
diff --git a/core/state_processor.go b/core/state_processor.go
index 3edc042a36..aca2929eb2 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -19,6 +19,8 @@ package core
import (
"math/big"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -26,25 +28,22 @@ import (
"github.com/ethereum/go-ethereum/params"
)
-var (
- big8 = big.NewInt(8)
- big32 = big.NewInt(32)
-)
-
// StateProcessor is a basic Processor, which takes care of transitioning
// state from one point to another.
//
// StateProcessor implements Processor.
type StateProcessor struct {
- config *params.ChainConfig
- bc *BlockChain
+ config *params.ChainConfig // Chain configuration options
+ bc *BlockChain // Canonical block chain
+ engine consensus.Engine // Consensus engine used for block rewards
}
// NewStateProcessor initialises a new StateProcessor.
-func NewStateProcessor(config *params.ChainConfig, bc *BlockChain) *StateProcessor {
+func NewStateProcessor(config *params.ChainConfig, bc *BlockChain, engine consensus.Engine) *StateProcessor {
return &StateProcessor{
config: config,
bc: bc,
+ engine: engine,
}
}
@@ -59,18 +58,16 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
var (
receipts types.Receipts
totalUsedGas = big.NewInt(0)
- err error
header = block.Header()
allLogs []*types.Log
gp = new(GasPool).AddGas(block.GasLimit())
)
// Mutate the the block and state according to any hard-fork specs
if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 {
- ApplyDAOHardFork(statedb)
+ misc.ApplyDAOHardFork(statedb)
}
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
- //fmt.Println("tx:", i)
statedb.StartRecord(tx.Hash(), block.Hash(), i)
receipt, _, err := ApplyTransaction(p.config, p.bc, gp, statedb, header, tx, totalUsedGas, cfg)
if err != nil {
@@ -79,9 +76,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
receipts = append(receipts, receipt)
allLogs = append(allLogs, receipt.Logs...)
}
- AccumulateRewards(statedb, header, block.Uncles())
+ // Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
+ p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), receipts)
- return receipts, allLogs, totalUsedGas, err
+ return receipts, allLogs, totalUsedGas, nil
}
// ApplyTransaction attempts to apply a transaction to the given state database
@@ -122,23 +120,3 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, gp *GasPool, s
return receipt, gas, err
}
-
-// AccumulateRewards credits the coinbase of the given block with the
-// mining reward. The total reward consists of the static block reward
-// and rewards for included uncles. The coinbase of each uncle block is
-// also rewarded.
-func AccumulateRewards(statedb *state.StateDB, header *types.Header, uncles []*types.Header) {
- reward := new(big.Int).Set(BlockReward)
- r := new(big.Int)
- for _, uncle := range uncles {
- r.Add(uncle.Number, big8)
- r.Sub(r, header.Number)
- r.Mul(r, BlockReward)
- r.Div(r, big8)
- statedb.AddBalance(uncle.Coinbase, r)
-
- r.Div(BlockReward, big32)
- reward.Add(reward, r)
- }
- statedb.AddBalance(header.Coinbase, reward)
-}
diff --git a/core/types.go b/core/types.go
index 7fd658979c..1cfbbab29b 100644
--- a/core/types.go
+++ b/core/types.go
@@ -24,31 +24,17 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
)
-// Validator is an interface which defines the standard for block validation.
+// Validator is an interface which defines the standard for block validation. It
+// is only responsible for validating block contents, as the header validation is
+// done by the specific consensus engines.
//
-// The validator is responsible for validating incoming block or, if desired,
-// validates headers for fast validation.
-//
-// ValidateBlock validates the given block and should return an error if it
-// failed to do so and should be used for "full" validation.
-//
-// ValidateHeader validates the given header and parent and returns an error
-// if it failed to do so.
-//
-// ValidateState validates the given statedb and optionally the receipts and
-// gas used. The implementer should decide what to do with the given input.
type Validator interface {
- HeaderValidator
- ValidateBlock(block *types.Block) error
- ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
-}
+ // ValidateBody validates the given block's content.
+ ValidateBody(block *types.Block) error
-// HeaderValidator is an interface for validating headers only
-//
-// ValidateHeader validates the given header and parent and returns an error
-// if it failed to do so.
-type HeaderValidator interface {
- ValidateHeader(header, parent *types.Header, checkPow bool) error
+ // ValidateState validates the given statedb and optionally the receipts and
+ // gas used.
+ ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas *big.Int) error
}
// Processor is an interface for processing blocks using a given initial state.
diff --git a/core/types/block.go b/core/types/block.go
index b699ba6862..278594d506 100644
--- a/core/types/block.go
+++ b/core/types/block.go
@@ -348,12 +348,11 @@ func CalcUncleHash(uncles []*Header) common.Hash {
return rlpHash(uncles)
}
-// WithMiningResult returns a new block with the data from b
-// where nonce and mix digest are set to the provided values.
-func (b *Block) WithMiningResult(nonce BlockNonce, mixDigest common.Hash) *Block {
- cpy := *b.header
- cpy.Nonce = nonce
- cpy.MixDigest = mixDigest
+// WithSeal returns a new block with the data from b but the header replaced with
+// the sealed one.
+func (b *Block) WithSeal(header *Header) *Block {
+ cpy := *header
+
return &Block{
header: &cpy,
transactions: b.transactions,
diff --git a/eth/api.go b/eth/api.go
index b64153fd72..58ae27df23 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -26,7 +26,6 @@ import (
"io/ioutil"
"math/big"
"os"
- "runtime"
"strings"
"time"
@@ -37,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/internal/ethapi"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -56,18 +56,18 @@ func NewPublicEthereumAPI(e *Ethereum) *PublicEthereumAPI {
}
// Etherbase is the address that mining rewards will be send to
-func (s *PublicEthereumAPI) Etherbase() (common.Address, error) {
- return s.e.Etherbase()
+func (api *PublicEthereumAPI) Etherbase() (common.Address, error) {
+ return api.e.Etherbase()
}
// Coinbase is the address that mining rewards will be send to (alias for Etherbase)
-func (s *PublicEthereumAPI) Coinbase() (common.Address, error) {
- return s.Etherbase()
+func (api *PublicEthereumAPI) Coinbase() (common.Address, error) {
+ return api.Etherbase()
}
// Hashrate returns the POW hashrate
-func (s *PublicEthereumAPI) Hashrate() hexutil.Uint64 {
- return hexutil.Uint64(s.e.Miner().HashRate())
+func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 {
+ return hexutil.Uint64(api.e.Miner().HashRate())
}
// PublicMinerAPI provides an API to control the miner.
@@ -79,34 +79,34 @@ type PublicMinerAPI struct {
// NewPublicMinerAPI create a new PublicMinerAPI instance.
func NewPublicMinerAPI(e *Ethereum) *PublicMinerAPI {
- agent := miner.NewRemoteAgent(e.Pow())
+ agent := miner.NewRemoteAgent(e.BlockChain(), e.Engine())
e.Miner().Register(agent)
return &PublicMinerAPI{e, agent}
}
// Mining returns an indication if this node is currently mining.
-func (s *PublicMinerAPI) Mining() bool {
- return s.e.IsMining()
+func (api *PublicMinerAPI) Mining() bool {
+ return api.e.IsMining()
}
// SubmitWork can be used by external miner to submit their POW solution. It returns an indication if the work was
// accepted. Note, this is not an indication if the provided work was valid!
-func (s *PublicMinerAPI) SubmitWork(nonce types.BlockNonce, solution, digest common.Hash) bool {
- return s.agent.SubmitWork(nonce, digest, solution)
+func (api *PublicMinerAPI) SubmitWork(nonce types.BlockNonce, solution, digest common.Hash) bool {
+ return api.agent.SubmitWork(nonce, digest, solution)
}
// GetWork returns a work package for external miner. The work package consists of 3 strings
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
-func (s *PublicMinerAPI) GetWork() ([3]string, error) {
- if !s.e.IsMining() {
- if err := s.e.StartMining(0); err != nil {
+func (api *PublicMinerAPI) GetWork() ([3]string, error) {
+ if !api.e.IsMining() {
+ if err := api.e.StartMining(); err != nil {
return [3]string{}, err
}
}
- work, err := s.agent.GetWork()
+ work, err := api.agent.GetWork()
if err != nil {
return work, fmt.Errorf("mining not ready: %v", err)
}
@@ -116,8 +116,8 @@ func (s *PublicMinerAPI) GetWork() ([3]string, error) {
// SubmitHashrate can be used for remote miners to submit their hash rate. This enables the node to report the combined
// hash rate of all miners which submit work through this node. It accepts the miner hash rate and an identifier which
// must be unique between nodes.
-func (s *PublicMinerAPI) SubmitHashrate(hashrate hexutil.Uint64, id common.Hash) bool {
- s.agent.SubmitHashrate(id, uint64(hashrate))
+func (api *PublicMinerAPI) SubmitHashrate(hashrate hexutil.Uint64, id common.Hash) bool {
+ api.agent.SubmitHashrate(id, uint64(hashrate))
return true
}
@@ -132,47 +132,59 @@ func NewPrivateMinerAPI(e *Ethereum) *PrivateMinerAPI {
return &PrivateMinerAPI{e: e}
}
-// Start the miner with the given number of threads. If threads is nil the number of
-// workers started is equal to the number of logical CPU's that are usable by this process.
-func (s *PrivateMinerAPI) Start(threads *int) (bool, error) {
- var err error
- if threads == nil {
- err = s.e.StartMining(runtime.NumCPU())
- } else {
- err = s.e.StartMining(*threads)
+// Start the miner with the given number of threads. If threads is nil the number
+// of workers started is equal to the number of logical CPUs that are usable by
+// this process. If mining is already running, this method adjust the number of
+// threads allowed to use.
+func (api *PrivateMinerAPI) Start(threads *int) error {
+ // Set the number of threads if the seal engine supports it
+ if threads != nil {
+ type threaded interface {
+ SetThreads(threads int)
+ }
+ if th, ok := api.e.engine.(threaded); ok {
+ log.Info("Updated mining threads", "threads", *threads)
+ th.SetThreads(*threads)
+ } else {
+ log.Warn("Current seal engine isn't threaded")
+ }
}
- return err == nil, err
+ // Start the miner and return
+ if !api.e.IsMining() {
+ return api.e.StartMining()
+ }
+ return nil
}
// Stop the miner
-func (s *PrivateMinerAPI) Stop() bool {
- s.e.StopMining()
+func (api *PrivateMinerAPI) Stop() bool {
+ api.e.StopMining()
return true
}
// SetExtra sets the extra data string that is included when this miner mines a block.
-func (s *PrivateMinerAPI) SetExtra(extra string) (bool, error) {
- if err := s.e.Miner().SetExtra([]byte(extra)); err != nil {
+func (api *PrivateMinerAPI) SetExtra(extra string) (bool, error) {
+ if err := api.e.Miner().SetExtra([]byte(extra)); err != nil {
return false, err
}
return true, nil
}
// SetGasPrice sets the minimum accepted gas price for the miner.
-func (s *PrivateMinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
- s.e.Miner().SetGasPrice((*big.Int)(&gasPrice))
+func (api *PrivateMinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
+ api.e.Miner().SetGasPrice((*big.Int)(&gasPrice))
return true
}
// SetEtherbase sets the etherbase of the miner
-func (s *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool {
- s.e.SetEtherbase(etherbase)
+func (api *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool {
+ api.e.SetEtherbase(etherbase)
return true
}
// GetHashrate returns the current hashrate of the miner.
-func (s *PrivateMinerAPI) GetHashrate() uint64 {
- return uint64(s.e.miner.HashRate())
+func (api *PrivateMinerAPI) GetHashrate() uint64 {
+ return uint64(api.e.miner.HashRate())
}
// PrivateAdminAPI is the collection of Etheruem full node-related APIs
@@ -321,7 +333,7 @@ type TraceArgs struct {
Timeout *string
}
-// TraceBlock processes the given block's RLP but does not import the block in to
+// TraceBlock processes the given block'api RLP but does not import the block in to
// the chain.
func (api *PrivateDebugAPI) TraceBlock(blockRlp []byte, config *vm.LogConfig) BlockTraceResult {
var block types.Block
@@ -338,7 +350,7 @@ func (api *PrivateDebugAPI) TraceBlock(blockRlp []byte, config *vm.LogConfig) Bl
}
}
-// TraceBlockFromFile loads the block's RLP from the given file name and attempts to
+// TraceBlockFromFile loads the block'api RLP from the given file name and attempts to
// process it but does not import the block in to the chain.
func (api *PrivateDebugAPI) TraceBlockFromFile(file string, config *vm.LogConfig) BlockTraceResult {
blockRlp, err := ioutil.ReadFile(file)
@@ -395,8 +407,7 @@ func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConf
Debug: true,
Tracer: structLogger,
}
-
- if err := core.ValidateHeader(api.config, blockchain.AuxValidator(), block.Header(), blockchain.GetHeader(block.ParentHash(), block.NumberU64()-1), true, false); err != nil {
+ if err := api.eth.engine.VerifyHeader(blockchain, block.Header(), true); err != nil {
return false, structLogger.StructLogs(), err
}
statedb, err := blockchain.StateAt(blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1).Root())
diff --git a/eth/backend.go b/eth/backend.go
index b8df98976d..af1d46a2cc 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -22,10 +22,11 @@ import (
"math/big"
"regexp"
"sync"
- "time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -40,18 +41,9 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rpc"
)
-const (
- epochLength = 30000
- ethashRevision = 23
-
- autoDAGcheckInterval = 10 * time.Hour
- autoDAGepochHeight = epochLength / 2
-)
-
var (
datadirInUseErrnos = map[uint]bool{11: true, 32: true, 35: true}
portInUseErrRE = regexp.MustCompile("address already in use")
@@ -124,7 +116,7 @@ type Ethereum struct {
chainDb ethdb.Database // Block chain database
eventMux *event.TypeMux
- pow pow.PoW
+ engine consensus.Engine
accountManager *accounts.Manager
ApiBackend *EthApiBackend
@@ -163,7 +155,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
chainConfig: chainConfig,
eventMux: ctx.EventMux,
accountManager: ctx.AccountManager,
- pow: CreatePoW(ctx, config),
+ engine: CreateConsensusEngine(ctx, config, chainConfig, chainDb),
shutdownChan: make(chan bool),
stopDbUpgrade: stopDbUpgrade,
netVersionId: config.NetworkId,
@@ -186,7 +178,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
}
vmConfig := vm.Config{EnablePreimageRecording: config.EnablePreimageRecording}
- eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.pow, eth.eventMux, vmConfig)
+ eth.blockchain, err = core.NewBlockChain(chainDb, eth.chainConfig, eth.engine, eth.eventMux, vmConfig)
if err != nil {
return nil, err
}
@@ -211,10 +203,11 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
}
}
- if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, maxPeers, eth.eventMux, eth.txPool, eth.pow, eth.blockchain, chainDb); err != nil {
+ if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.FastSync, config.NetworkId, maxPeers, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb); err != nil {
return nil, err
}
- eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.pow)
+
+ eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine)
eth.miner.SetGasPrice(config.GasPrice)
eth.miner.SetExtra(config.ExtraData)
@@ -241,20 +234,20 @@ func CreateDB(ctx *node.ServiceContext, config *Config, name string) (ethdb.Data
return db, err
}
-// CreatePoW creates the required type of PoW instance for an Ethereum service
-func CreatePoW(ctx *node.ServiceContext, config *Config) pow.PoW {
+// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service
+func CreateConsensusEngine(ctx *node.ServiceContext, config *Config, chainConfig *params.ChainConfig, db ethdb.Database) consensus.Engine {
switch {
case config.PowFake:
log.Warn("Ethash used in fake mode")
- return pow.FakePow{}
+ return ethash.NewFaker()
case config.PowTest:
log.Warn("Ethash used in test mode")
- return pow.NewTestEthash()
+ return ethash.NewTester()
case config.PowShared:
log.Warn("Ethash used in shared mode")
- return pow.NewSharedEthash()
+ return ethash.NewShared()
default:
- return pow.NewFullEthash(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
+ return ethash.New(ctx.ResolvePath(config.EthashCacheDir), config.EthashCachesInMem, config.EthashCachesOnDisk,
config.EthashDatasetDir, config.EthashDatasetsInMem, config.EthashDatasetsOnDisk)
}
}
@@ -262,7 +255,13 @@ func CreatePoW(ctx *node.ServiceContext, config *Config) pow.PoW {
// APIs returns the collection of RPC services the ethereum package offers.
// NOTE, some of these services probably need to be moved to somewhere else.
func (s *Ethereum) APIs() []rpc.API {
- return append(ethapi.GetAPIs(s.ApiBackend, s.solcPath), []rpc.API{
+ apis := ethapi.GetAPIs(s.ApiBackend, s.solcPath)
+
+ // Append any APIs exposed explicitly by the consensus engine
+ apis = append(apis, s.engine.APIs(s.BlockChain())...)
+
+ // Append all the local APIs and return
+ return append(apis, []rpc.API{
{
Namespace: "eth",
Version: "1.0",
@@ -332,13 +331,13 @@ func (self *Ethereum) SetEtherbase(etherbase common.Address) {
self.miner.SetEtherbase(etherbase)
}
-func (s *Ethereum) StartMining(threads int) error {
+func (s *Ethereum) StartMining() error {
eb, err := s.Etherbase()
if err != nil {
log.Error("Cannot start mining without etherbase", "err", err)
return fmt.Errorf("etherbase missing: %v", err)
}
- go s.miner.Start(eb, threads)
+ go s.miner.Start(eb)
return nil
}
@@ -350,7 +349,7 @@ func (s *Ethereum) AccountManager() *accounts.Manager { return s.accountManager
func (s *Ethereum) BlockChain() *core.BlockChain { return s.blockchain }
func (s *Ethereum) TxPool() *core.TxPool { return s.txPool }
func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux }
-func (s *Ethereum) Pow() pow.PoW { return s.pow }
+func (s *Ethereum) Engine() consensus.Engine { return s.engine }
func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb }
func (s *Ethereum) IsListening() bool { return true } // Always listening
func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go
index d82f4f3e6c..64a9910696 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/fetcher.go
@@ -52,8 +52,8 @@ type headerRequesterFn func(common.Hash) error
// bodyRequesterFn is a callback type for sending a body retrieval request.
type bodyRequesterFn func([]common.Hash) error
-// blockValidatorFn is a callback type to verify a block's header for fast propagation.
-type blockValidatorFn func(block *types.Block, parent *types.Block) error
+// headerVerifierFn is a callback type to verify a block's header for fast propagation.
+type headerVerifierFn func(header *types.Header) error
// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
type blockBroadcasterFn func(block *types.Block, propagate bool)
@@ -129,7 +129,7 @@ type Fetcher struct {
// Callbacks
getBlock blockRetrievalFn // Retrieves a block from the local chain
- validateBlock blockValidatorFn // Checks if a block's headers have a valid proof of work
+ verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work
broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
chainHeight chainHeightFn // Retrieves the current chain's height
insertChain chainInsertFn // Injects a batch of blocks into the chain
@@ -144,7 +144,7 @@ type Fetcher struct {
}
// New creates a block fetcher to retrieve blocks based on hash announcements.
-func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
+func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
return &Fetcher{
notify: make(chan *announce),
inject: make(chan *inject),
@@ -162,7 +162,7 @@ func New(getBlock blockRetrievalFn, validateBlock blockValidatorFn, broadcastBlo
queues: make(map[string]int),
queued: make(map[common.Hash]*inject),
getBlock: getBlock,
- validateBlock: validateBlock,
+ verifyHeader: verifyHeader,
broadcastBlock: broadcastBlock,
chainHeight: chainHeight,
insertChain: insertChain,
@@ -648,7 +648,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
return
}
// Quickly validate the header and propagate the block if it passes
- switch err := f.validateBlock(block, parent); err {
+ switch err := f.verifyHeader(block.Header()); err {
case nil:
// All ok, quickly propagate to our peers
propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/fetcher_test.go
index 7a94241c66..85d2f8645e 100644
--- a/eth/fetcher/fetcher_test.go
+++ b/eth/fetcher/fetcher_test.go
@@ -91,7 +91,7 @@ func newTester() *fetcherTester {
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
drops: make(map[string]bool),
}
- tester.fetcher = New(tester.getBlock, tester.verifyBlock, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
+ tester.fetcher = New(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
tester.fetcher.Start()
return tester
@@ -105,8 +105,8 @@ func (f *fetcherTester) getBlock(hash common.Hash) *types.Block {
return f.blocks[hash]
}
-// verifyBlock is a nop placeholder for the block header verification.
-func (f *fetcherTester) verifyBlock(block *types.Block, parent *types.Block) error {
+// verifyHeader is a nop placeholder for the block header verification.
+func (f *fetcherTester) verifyHeader(header *types.Header) error {
return nil
}
diff --git a/eth/handler.go b/eth/handler.go
index ade8f77192..4452720603 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -27,6 +27,8 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/downloader"
@@ -37,7 +39,6 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -97,7 +98,7 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
-func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int, maxPeers int, mux *event.TypeMux, txpool txPool, pow pow.PoW, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
+func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int, maxPeers int, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
networkId: networkId,
@@ -165,8 +166,8 @@ func NewProtocolManager(config *params.ChainConfig, fastSync bool, networkId int
blockchain.GetTdByHash, blockchain.InsertHeaderChain, manager.insertChain, blockchain.InsertReceiptChain, blockchain.Rollback,
manager.removePeer)
- validator := func(block *types.Block, parent *types.Block) error {
- return core.ValidateHeader(config, pow, block.Header(), parent.Header(), true, false)
+ validator := func(header *types.Header) error {
+ return engine.VerifyHeader(blockchain, header, true)
}
heighter := func() uint64 {
return blockchain.CurrentBlock().NumberU64()
@@ -448,7 +449,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
p.forkDrop = nil
// Validate the header and either drop the peer or continue
- if err := core.ValidateDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
+ if err := misc.VerifyDAOHeaderExtraData(pm.chainconfig, headers[0]); err != nil {
p.Log().Debug("Verified to be on the other side of the DAO fork, dropping")
return err
}
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 31435b331d..f85d730b67 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -34,7 +35,6 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
var bigTxGas = new(big.Int).SetUint64(params.TxGas)
@@ -469,7 +469,7 @@ func testDAOChallenge(t *testing.T, localForked, remoteForked bool, timeout bool
// Create a DAO aware protocol manager
var (
evmux = new(event.TypeMux)
- pow = new(pow.FakePow)
+ pow = ethash.NewFaker()
db, _ = ethdb.NewMemDatabase()
config = ¶ms.ChainConfig{DAOForkBlock: big.NewInt(1), DAOForkSupport: localForked}
gspec = &core.Genesis{Config: config}
diff --git a/eth/helper_test.go b/eth/helper_test.go
index e67b496915..a8c538e6ce 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -28,6 +28,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -37,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
var (
@@ -50,22 +50,22 @@ var (
// channels for different events.
func newTestProtocolManager(fastSync bool, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, error) {
var (
- evmux = new(event.TypeMux)
- pow = new(pow.FakePow)
- db, _ = ethdb.NewMemDatabase()
- gspec = &core.Genesis{
+ evmux = new(event.TypeMux)
+ engine = ethash.NewFaker()
+ db, _ = ethdb.NewMemDatabase()
+ gspec = &core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}},
}
genesis = gspec.MustCommit(db)
- blockchain, _ = core.NewBlockChain(db, gspec.Config, pow, evmux, vm.Config{})
+ blockchain, _ = core.NewBlockChain(db, gspec.Config, engine, evmux, vm.Config{})
)
chain, _ := core.GenerateChain(gspec.Config, genesis, db, blocks, generator)
if _, err := blockchain.InsertChain(chain); err != nil {
panic(err)
}
- pm, err := NewProtocolManager(gspec.Config, fastSync, NetworkId, 1000, evmux, &testTxPool{added: newtx}, pow, blockchain, db)
+ pm, err := NewProtocolManager(gspec.Config, fastSync, NetworkId, 1000, evmux, &testTxPool{added: newtx}, engine, blockchain, db)
if err != nil {
return nil, err
}
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index ccb7ec80b1..987e144198 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -39,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
"github.com/syndtr/goleveldb/leveldb"
@@ -1378,7 +1378,7 @@ func (api *PublicDebugAPI) SeedHash(ctx context.Context, number uint64) (string,
if block == nil {
return "", fmt.Errorf("block #%d not found", number)
}
- return fmt.Sprintf("0x%x", pow.EthashSeedHash(number)), nil
+ return fmt.Sprintf("0x%x", ethash.SeedHash(number)), nil
}
// PrivateDebugAPI is the collection of Etheruem APIs exposed over the private
diff --git a/les/backend.go b/les/backend.go
index d656cf41f5..bb08efd915 100644
--- a/les/backend.go
+++ b/les/backend.go
@@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/compiler"
"github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
@@ -39,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
rpc "github.com/ethereum/go-ethereum/rpc"
)
@@ -59,7 +59,7 @@ type LightEthereum struct {
ApiBackend *LesApiBackend
eventMux *event.TypeMux
- pow pow.PoW
+ engine consensus.Engine
accountManager *accounts.Manager
solcPath string
solc *compiler.Solidity
@@ -88,14 +88,12 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
chainConfig: chainConfig,
eventMux: ctx.EventMux,
accountManager: ctx.AccountManager,
- pow: eth.CreatePoW(ctx, config),
+ engine: eth.CreateConsensusEngine(ctx, config, chainConfig, chainDb),
shutdownChan: make(chan bool),
netVersionId: config.NetworkId,
solcPath: config.SolcPath,
}
-
- eth.blockchain, err = light.NewLightChain(odr, eth.chainConfig, eth.pow, eth.eventMux)
- if err != nil {
+ if eth.blockchain, err = light.NewLightChain(odr, eth.chainConfig, eth.engine, eth.eventMux); err != nil {
return nil, err
}
// Rewind the chain in case of an incompatible config upgrade.
@@ -106,7 +104,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
}
eth.txPool = light.NewTxPool(eth.chainConfig, eth.eventMux, eth.blockchain, eth.relay)
- if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.LightMode, config.NetworkId, eth.eventMux, eth.pow, eth.blockchain, nil, chainDb, odr, relay); err != nil {
+ if eth.protocolManager, err = NewProtocolManager(eth.chainConfig, config.LightMode, config.NetworkId, eth.eventMux, eth.engine, eth.blockchain, nil, chainDb, odr, relay); err != nil {
return nil, err
}
relay.ps = eth.protocolManager.peers
diff --git a/les/handler.go b/les/handler.go
index ece2060ee8..fbb9e99062 100644
--- a/les/handler.go
+++ b/les/handler.go
@@ -27,6 +27,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -39,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
@@ -128,7 +128,7 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
-func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, networkId int, mux *event.TypeMux, pow pow.PoW, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay) (*ProtocolManager, error) {
+func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, networkId int, mux *event.TypeMux, engine consensus.Engine, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
lightSync: lightSync,
diff --git a/les/helper_test.go b/les/helper_test.go
index f37bec80e1..7e442c131b 100644
--- a/les/helper_test.go
+++ b/les/helper_test.go
@@ -28,6 +28,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@@ -39,7 +40,6 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
var (
@@ -134,10 +134,10 @@ func testRCL() RequestCostList {
// channels for different events.
func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *core.BlockGen)) (*ProtocolManager, ethdb.Database, *LesOdr, error) {
var (
- evmux = new(event.TypeMux)
- pow = new(pow.FakePow)
- db, _ = ethdb.NewMemDatabase()
- gspec = core.Genesis{
+ evmux = new(event.TypeMux)
+ engine = ethash.NewFaker()
+ db, _ = ethdb.NewMemDatabase()
+ gspec = core.Genesis{
Config: params.TestChainConfig,
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}},
}
@@ -148,9 +148,9 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
if lightSync {
odr = NewLesOdr(db)
- chain, _ = light.NewLightChain(odr, gspec.Config, pow, evmux)
+ chain, _ = light.NewLightChain(odr, gspec.Config, engine, evmux)
} else {
- blockchain, _ := core.NewBlockChain(db, gspec.Config, pow, evmux, vm.Config{})
+ blockchain, _ := core.NewBlockChain(db, gspec.Config, engine, evmux, vm.Config{})
gchain, _ := core.GenerateChain(gspec.Config, genesis, db, blocks, generator)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
@@ -158,7 +158,7 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor
chain = blockchain
}
- pm, err := NewProtocolManager(gspec.Config, lightSync, NetworkId, evmux, pow, chain, nil, db, odr, nil)
+ pm, err := NewProtocolManager(gspec.Config, lightSync, NetworkId, evmux, engine, chain, nil, db, odr, nil)
if err != nil {
return nil, nil, nil, err
}
diff --git a/les/server.go b/les/server.go
index 5957add367..22fe59b7ac 100644
--- a/les/server.go
+++ b/les/server.go
@@ -45,7 +45,7 @@ type LesServer struct {
}
func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) {
- pm, err := NewProtocolManager(eth.BlockChain().Config(), false, config.NetworkId, eth.EventMux(), eth.Pow(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil)
+ pm, err := NewProtocolManager(eth.BlockChain().Config(), false, config.NetworkId, eth.EventMux(), eth.Engine(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil)
if err != nil {
return nil, err
}
diff --git a/light/lightchain.go b/light/lightchain.go
index 98fb024f0b..4073e39e59 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -24,13 +24,13 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/hashicorp/golang-lru"
)
@@ -64,14 +64,13 @@ type LightChain struct {
procInterrupt int32 // interrupt signaler for block processing
wg sync.WaitGroup
- pow pow.PoW
- validator core.HeaderValidator
+ engine consensus.Engine
}
// NewLightChain returns a fully initialised light chain using information
// available in the database. It initialises the default Ethereum header
// validator.
-func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux *event.TypeMux) (*LightChain, error) {
+func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.Engine, mux *event.TypeMux) (*LightChain, error) {
bodyCache, _ := lru.New(bodyCacheLimit)
bodyRLPCache, _ := lru.New(bodyCacheLimit)
blockCache, _ := lru.New(blockCacheLimit)
@@ -84,21 +83,17 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux
bodyCache: bodyCache,
bodyRLPCache: bodyRLPCache,
blockCache: blockCache,
- pow: pow,
+ engine: engine,
}
-
var err error
- bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.Validator, bc.getProcInterrupt)
- bc.SetValidator(core.NewHeaderValidator(config, bc.hc, pow))
+ bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.engine, bc.getProcInterrupt)
if err != nil {
return nil, err
}
-
bc.genesisBlock, _ = bc.GetBlockByNumber(NoOdr, 0)
if bc.genesisBlock == nil {
return nil, core.ErrNoGenesis
}
-
if bc.genesisBlock.Hash() == params.MainNetGenesisHash {
// add trusted CHT
WriteTrustedCht(bc.chainDb, TrustedCht{Number: 805, Root: common.HexToHash("85e4286fe0a730390245c49de8476977afdae0eb5530b277f62a52b12313d50f")})
@@ -145,9 +140,6 @@ func (self *LightChain) loadLastState() error {
headerTd := self.GetTd(header.Hash(), header.Number.Uint64())
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd)
- // Try to be smart and issue a pow verification for the head to pre-generate caches
- go self.pow.Verify(types.NewBlockWithHeader(header))
-
return nil
}
@@ -188,20 +180,6 @@ func (self *LightChain) Status() (td *big.Int, currentBlock common.Hash, genesis
return self.GetTd(hash, header.Number.Uint64()), hash, self.genesisBlock.Hash()
}
-// SetValidator sets the validator which is used to validate incoming headers.
-func (self *LightChain) SetValidator(validator core.HeaderValidator) {
- self.procmu.Lock()
- defer self.procmu.Unlock()
- self.validator = validator
-}
-
-// Validator returns the current header validator.
-func (self *LightChain) Validator() core.HeaderValidator {
- self.procmu.RLock()
- defer self.procmu.RUnlock()
- return self.validator
-}
-
// State returns a new mutable state based on the current HEAD block.
func (self *LightChain) State() *LightState {
return NewLightState(StateTrieID(self.hc.CurrentHeader()), self.odr)
diff --git a/light/lightchain_test.go b/light/lightchain_test.go
index e9236f1fd2..41010cf577 100644
--- a/light/lightchain_test.go
+++ b/light/lightchain_test.go
@@ -23,12 +23,12 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// So we can deterministically seed different blockchains
@@ -49,18 +49,15 @@ func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) [
return headers
}
-func testChainConfig() *params.ChainConfig {
- return ¶ms.ChainConfig{HomesteadBlock: big.NewInt(0)}
-}
-
// newCanonical creates a chain database, and injects a deterministic canonical
// chain. Depending on the full flag, if creates either a full block chain or a
// header only chain.
func newCanonical(n int) (ethdb.Database, *LightChain, error) {
db, _ := ethdb.NewMemDatabase()
- gspec := core.Genesis{Config: testChainConfig()}
+ gspec := core.Genesis{Config: params.TestChainConfig}
genesis := gspec.MustCommit(db)
- blockchain, _ := NewLightChain(&dummyOdr{db: db}, gspec.Config, pow.FakePow{}, new(event.TypeMux))
+ blockchain, _ := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFaker(), new(event.TypeMux))
+
// Create and inject the requested chain
if n == 0 {
return db, blockchain, nil
@@ -76,14 +73,13 @@ func newTestLightChain() *LightChain {
db, _ := ethdb.NewMemDatabase()
gspec := &core.Genesis{
Difficulty: big.NewInt(1),
- Config: testChainConfig(),
+ Config: params.TestChainConfig,
}
gspec.MustCommit(db)
- lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, pow.NewTestEthash(), new(event.TypeMux))
+ lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFullFaker(), new(event.TypeMux))
if err != nil {
panic(err)
}
- lc.SetValidator(bproc{})
return lc
}
@@ -130,17 +126,17 @@ func printChain(bc *LightChain) {
// testHeaderChainImport tries to process a chain of header, writing them into
// the database if successful.
-func testHeaderChainImport(chain []*types.Header, LightChain *LightChain) error {
+func testHeaderChainImport(chain []*types.Header, lightchain *LightChain) error {
for _, header := range chain {
// Try and validate the header
- if err := LightChain.Validator().ValidateHeader(header, LightChain.GetHeaderByHash(header.ParentHash), false); err != nil {
+ if err := lightchain.engine.VerifyHeader(lightchain.hc, header, true); err != nil {
return err
}
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
- LightChain.mu.Lock()
- core.WriteTd(LightChain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, LightChain.GetTdByHash(header.ParentHash)))
- core.WriteHeader(LightChain.chainDb, header)
- LightChain.mu.Unlock()
+ lightchain.mu.Lock()
+ core.WriteTd(lightchain.chainDb, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, lightchain.GetTdByHash(header.ParentHash)))
+ core.WriteHeader(lightchain.chainDb, header)
+ lightchain.mu.Unlock()
}
return nil
}
@@ -257,10 +253,6 @@ func TestBrokenHeaderChain(t *testing.T) {
}
}
-type bproc struct{}
-
-func (bproc) ValidateHeader(*types.Header, *types.Header, bool) error { return nil }
-
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
var chain []*types.Header
for i, difficulty := range d {
@@ -359,7 +351,7 @@ func TestReorgBadHeaderHashes(t *testing.T) {
defer func() { delete(core.BadHashes, headers[3].Hash()) }()
// Create a new LightChain and check that it rolled back the state.
- ncm, err := NewLightChain(&dummyOdr{db: bc.chainDb}, testChainConfig(), pow.FakePow{}, new(event.TypeMux))
+ ncm, err := NewLightChain(&dummyOdr{db: bc.chainDb}, params.TestChainConfig, ethash.NewFaker(), new(event.TypeMux))
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
diff --git a/light/odr_test.go b/light/odr_test.go
index 37d9999941..ca33db2463 100644
--- a/light/odr_test.go
+++ b/light/odr_test.go
@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -34,7 +35,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
@@ -248,7 +248,6 @@ func testChainGen(i int, block *core.BlockGen) {
func testChainOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
var (
evmux = new(event.TypeMux)
- pow = new(pow.FakePow)
sdb, _ = ethdb.NewMemDatabase()
ldb, _ = ethdb.NewMemDatabase()
gspec = core.Genesis{Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}}
@@ -256,16 +255,14 @@ func testChainOdr(t *testing.T, protocol int, expFail uint64, fn odrTestFn) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, testChainConfig(), pow, evmux, vm.Config{})
- chainConfig := ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}
- gchain, _ := core.GenerateChain(chainConfig, genesis, sdb, 4, testChainGen)
+ blockchain, _ := core.NewBlockChain(sdb, params.TestChainConfig, ethash.NewFullFaker(), evmux, vm.Config{})
+ gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, sdb, 4, testChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
}
odr := &testOdr{sdb: sdb, ldb: ldb}
- lightchain, _ := NewLightChain(odr, testChainConfig(), pow, evmux)
- lightchain.SetValidator(bproc{})
+ lightchain, _ := NewLightChain(odr, params.TestChainConfig, ethash.NewFullFaker(), evmux)
headers := make([]*types.Header, len(gchain))
for i, block := range gchain {
headers[i] = block.Header()
diff --git a/light/txpool_test.go b/light/txpool_test.go
index 2b5fa7116d..f23832a417 100644
--- a/light/txpool_test.go
+++ b/light/txpool_test.go
@@ -24,13 +24,13 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
type testTxRelay struct {
@@ -83,7 +83,6 @@ func TestTxPool(t *testing.T) {
var (
evmux = new(event.TypeMux)
- pow = new(pow.FakePow)
sdb, _ = ethdb.NewMemDatabase()
ldb, _ = ethdb.NewMemDatabase()
gspec = core.Genesis{Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}}
@@ -91,9 +90,8 @@ func TestTxPool(t *testing.T) {
)
gspec.MustCommit(ldb)
// Assemble the test environment
- blockchain, _ := core.NewBlockChain(sdb, testChainConfig(), pow, evmux, vm.Config{})
- chainConfig := ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}
- gchain, _ := core.GenerateChain(chainConfig, genesis, sdb, poolTestBlocks, txPoolTestChainGen)
+ blockchain, _ := core.NewBlockChain(sdb, params.TestChainConfig, ethash.NewFullFaker(), evmux, vm.Config{})
+ gchain, _ := core.GenerateChain(params.TestChainConfig, genesis, sdb, poolTestBlocks, txPoolTestChainGen)
if _, err := blockchain.InsertChain(gchain); err != nil {
panic(err)
}
@@ -104,10 +102,9 @@ func TestTxPool(t *testing.T) {
discard: make(chan int, 1),
mined: make(chan int, 1),
}
- lightchain, _ := NewLightChain(odr, testChainConfig(), pow, evmux)
- lightchain.SetValidator(bproc{})
+ lightchain, _ := NewLightChain(odr, params.TestChainConfig, ethash.NewFullFaker(), evmux)
txPermanent = 50
- pool := NewTxPool(testChainConfig(), evmux, lightchain, relay)
+ pool := NewTxPool(params.TestChainConfig, evmux, lightchain, relay)
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
diff --git a/log/format.go b/log/format.go
index e315b5237d..6c19c7a552 100644
--- a/log/format.go
+++ b/log/format.go
@@ -133,10 +133,10 @@ func TerminalFormat(usecolor bool) Format {
}
}
// try to justify the log output for short messages
- if len(r.Ctx) > 0 && len(r.Msg) < termMsgJust {
- b.Write(bytes.Repeat([]byte{' '}, termMsgJust-len(r.Msg)))
+ length := utf8.RuneCountInString(r.Msg)
+ if len(r.Ctx) > 0 && length < termMsgJust {
+ b.Write(bytes.Repeat([]byte{' '}, termMsgJust-length))
}
-
// print the keys logfmt style
logfmt(b, r.Ctx, color, true)
return b.Bytes()
diff --git a/miner/agent.go b/miner/agent.go
index 3c407f20bb..855892a07d 100644
--- a/miner/agent.go
+++ b/miner/agent.go
@@ -17,56 +17,49 @@
package miner
import (
- "fmt"
"sync"
"sync/atomic"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/pow"
)
type CpuAgent struct {
mu sync.Mutex
workCh chan *Work
- quit chan struct{}
+ stop chan struct{}
quitCurrentOp chan struct{}
returnCh chan<- *Result
- index int
- pow pow.PoW
+ chain consensus.ChainReader
+ engine consensus.Engine
isMining int32 // isMining indicates whether the agent is currently mining
}
-func NewCpuAgent(index int, pow pow.PoW) *CpuAgent {
+func NewCpuAgent(chain consensus.ChainReader, engine consensus.Engine) *CpuAgent {
miner := &CpuAgent{
- pow: pow,
- index: index,
- quit: make(chan struct{}),
+ chain: chain,
+ engine: engine,
+ stop: make(chan struct{}, 1),
workCh: make(chan *Work, 1),
}
-
return miner
}
func (self *CpuAgent) Work() chan<- *Work { return self.workCh }
-func (self *CpuAgent) Pow() pow.PoW { return self.pow }
func (self *CpuAgent) SetReturnCh(ch chan<- *Result) { self.returnCh = ch }
func (self *CpuAgent) Stop() {
- close(self.quit)
+ self.stop <- struct{}{}
}
func (self *CpuAgent) Start() {
-
if !atomic.CompareAndSwapInt32(&self.isMining, 0, 1) {
return // agent already started
}
-
go self.update()
}
@@ -82,7 +75,7 @@ out:
self.quitCurrentOp = make(chan struct{})
go self.mine(work, self.quitCurrentOp)
self.mu.Unlock()
- case <-self.quit:
+ case <-self.stop:
self.mu.Lock()
if self.quitCurrentOp != nil {
close(self.quitCurrentOp)
@@ -99,27 +92,27 @@ done:
select {
case <-self.workCh:
default:
- close(self.workCh)
break done
}
}
-
atomic.StoreInt32(&self.isMining, 0)
}
func (self *CpuAgent) mine(work *Work, stop <-chan struct{}) {
- log.Debug(fmt.Sprintf("(re)started agent[%d]. mining...\n", self.index))
-
- // Mine
- nonce, mixDigest := self.pow.Search(work.Block, stop)
- if nonce != 0 {
- block := work.Block.WithMiningResult(types.EncodeNonce(nonce), common.BytesToHash(mixDigest))
- self.returnCh <- &Result{work, block}
+ if result, err := self.engine.Seal(self.chain, work.Block, stop); result != nil {
+ log.Info("Successfully sealed new block", "number", result.Number(), "hash", result.Hash())
+ self.returnCh <- &Result{work, result}
} else {
+ if err != nil {
+ log.Warn("Block sealing failed", "err", err)
+ }
self.returnCh <- nil
}
}
func (self *CpuAgent) GetHashRate() int64 {
- return int64(self.pow.Hashrate())
+ if pow, ok := self.engine.(consensus.PoW); ok {
+ return int64(pow.Hashrate())
+ }
+ return 0
}
diff --git a/miner/miner.go b/miner/miner.go
index dc0591b9a0..453fff04d9 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -32,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
)
// Backend wraps all methods required for mining.
@@ -49,24 +49,24 @@ type Miner struct {
worker *worker
- threads int
coinbase common.Address
mining int32
eth Backend
- pow pow.PoW
+ engine consensus.Engine
canStart int32 // can start indicates whether we can start the mining operation
shouldStart int32 // should start indicates whether we should start after sync
}
-func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, pow pow.PoW) *Miner {
+func New(eth Backend, config *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine) *Miner {
miner := &Miner{
eth: eth,
mux: mux,
- pow: pow,
- worker: newWorker(config, common.Address{}, eth, mux),
+ engine: engine,
+ worker: newWorker(config, engine, common.Address{}, eth, mux),
canStart: 1,
}
+ miner.Register(NewCpuAgent(eth.BlockChain(), engine))
go miner.update()
return miner
@@ -86,7 +86,7 @@ out:
if self.Mining() {
self.Stop()
atomic.StoreInt32(&self.shouldStart, 1)
- log.Info(fmt.Sprint("Mining operation aborted due to sync operation"))
+ log.Info("Mining aborted due to sync")
}
case downloader.DoneEvent, downloader.FailedEvent:
shouldStart := atomic.LoadInt32(&self.shouldStart) == 1
@@ -94,7 +94,7 @@ out:
atomic.StoreInt32(&self.canStart, 1)
atomic.StoreInt32(&self.shouldStart, 0)
if shouldStart {
- self.Start(self.coinbase, self.threads)
+ self.Start(self.coinbase)
}
// unsubscribe. we're only interested in this event once
events.Unsubscribe()
@@ -116,23 +116,18 @@ func (m *Miner) SetGasPrice(price *big.Int) {
m.worker.setGasPrice(price)
}
-func (self *Miner) Start(coinbase common.Address, threads int) {
+func (self *Miner) Start(coinbase common.Address) {
atomic.StoreInt32(&self.shouldStart, 1)
self.worker.setEtherbase(coinbase)
self.coinbase = coinbase
- self.threads = threads
if atomic.LoadInt32(&self.canStart) == 0 {
- log.Info(fmt.Sprint("Can not start mining operation due to network sync (starts when finished)"))
+ log.Info("Network syncing, will start miner afterwards")
return
}
atomic.StoreInt32(&self.mining, 1)
- for i := 0; i < threads; i++ {
- self.worker.register(NewCpuAgent(i, self.pow))
- }
-
- log.Info(fmt.Sprintf("Starting mining operation (CPU=%d TOT=%d)\n", threads, len(self.worker.agents)))
+ log.Info("Starting mining operation")
self.worker.start()
self.worker.commitNewWork()
}
@@ -159,7 +154,9 @@ func (self *Miner) Mining() bool {
}
func (self *Miner) HashRate() (tot int64) {
- tot += int64(self.pow.Hashrate())
+ if pow, ok := self.engine.(consensus.PoW); ok {
+ tot += int64(pow.Hashrate())
+ }
// do we care this might race? is it worth we're rewriting some
// aspects of the worker/locking up agents so we can get an accurate
// hashrate?
diff --git a/miner/remote_agent.go b/miner/remote_agent.go
index 08c5fc6f08..bb223ba1bc 100644
--- a/miner/remote_agent.go
+++ b/miner/remote_agent.go
@@ -18,16 +18,16 @@ package miner
import (
"errors"
- "fmt"
"math/big"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/pow"
)
type hashrate struct {
@@ -42,7 +42,8 @@ type RemoteAgent struct {
workCh chan *Work
returnCh chan<- *Result
- pow pow.PoW
+ chain consensus.ChainReader
+ engine consensus.Engine
currentWork *Work
work map[common.Hash]*Work
@@ -52,9 +53,10 @@ type RemoteAgent struct {
running int32 // running indicates whether the agent is active. Call atomically
}
-func NewRemoteAgent(pow pow.PoW) *RemoteAgent {
+func NewRemoteAgent(chain consensus.ChainReader, engine consensus.Engine) *RemoteAgent {
return &RemoteAgent{
- pow: pow,
+ chain: chain,
+ engine: engine,
work: make(map[common.Hash]*Work),
hashrate: make(map[common.Hash]hashrate),
}
@@ -114,7 +116,7 @@ func (a *RemoteAgent) GetWork() ([3]string, error) {
block := a.currentWork.Block
res[0] = block.HashNoNonce().Hex()
- seedHash := pow.EthashSeedHash(block.NumberU64())
+ seedHash := ethash.SeedHash(block.NumberU64())
res[1] = common.BytesToHash(seedHash).Hex()
// Calculate the "target" to be returned to the external miner
n := big.NewInt(1)
@@ -129,8 +131,8 @@ func (a *RemoteAgent) GetWork() ([3]string, error) {
return res, errors.New("No work available yet, don't panic.")
}
-// SubmitWork tries to inject a PoW solution tinto the remote agent, returning
-// whether the solution was acceted or not (not can be both a bad PoW as well as
+// SubmitWork tries to inject a pow solution into the remote agent, returning
+// whether the solution was accepted or not (not can be both a bad pow as well as
// any other error, like no work pending).
func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.Hash) bool {
a.mu.Lock()
@@ -139,15 +141,20 @@ func (a *RemoteAgent) SubmitWork(nonce types.BlockNonce, mixDigest, hash common.
// Make sure the work submitted is present
work := a.work[hash]
if work == nil {
- log.Info(fmt.Sprintf("Work was submitted for %x but no pending work found", hash))
+ log.Info("Work submitted but none pending", "hash", hash)
return false
}
- // Make sure the PoW solutions is indeed valid
- block := work.Block.WithMiningResult(nonce, mixDigest)
- if err := a.pow.Verify(block); err != nil {
- log.Warn(fmt.Sprintf("Invalid PoW submitted for %x: %v", hash, err))
+ // Make sure the Engine solutions is indeed valid
+ result := work.Block.Header()
+ result.Nonce = nonce
+ result.MixDigest = mixDigest
+
+ if err := a.engine.VerifySeal(a.chain, result); err != nil {
+ log.Warn("Invalid proof-of-work submitted", "hash", hash, "err", err)
return false
}
+ block := work.Block.WithSeal(result)
+
// Solutions seems to be valid, return to the miner and notify acceptance
a.returnCh <- &Result{work, block}
delete(a.work, hash)
diff --git a/miner/unconfirmed.go b/miner/unconfirmed.go
index bb7d0ff267..ee52d8512f 100644
--- a/miner/unconfirmed.go
+++ b/miner/unconfirmed.go
@@ -18,7 +18,6 @@ package miner
import (
"container/ring"
- "fmt"
"sync"
"github.com/ethereum/go-ethereum/common"
@@ -80,7 +79,7 @@ func (set *unconfirmedBlocks) Insert(index uint64, hash common.Hash) {
set.blocks.Move(-1).Link(item)
}
// Display a log for the user to notify of a new mined block unconfirmed
- log.Info(fmt.Sprintf("🔨 mined potential block #%d [%x…], waiting for %d blocks to confirm", index, hash.Bytes()[:4], set.depth))
+ log.Info("🔨 mined potential block", "number", index, "hash", hash)
}
// Shift drops all unconfirmed blocks from the set which exceed the unconfirmed sets depth
@@ -100,11 +99,11 @@ func (set *unconfirmedBlocks) Shift(height uint64) {
header := set.chain.GetHeaderByNumber(next.index)
switch {
case header == nil:
- log.Warn(fmt.Sprintf("failed to retrieve header of mined block #%d [%x…]", next.index, next.hash.Bytes()[:4]))
+ log.Warn("Failed to retrieve header of mined block", "number", next.index, "hash", next.hash)
case header.Hash() == next.hash:
- log.Info(fmt.Sprintf("🔗 mined block #%d [%x…] reached canonical chain", next.index, next.hash.Bytes()[:4]))
+ log.Info("🔗 block reached canonical chain", "number", next.index, "hash", next.hash)
default:
- log.Info(fmt.Sprintf("⑂ mined block #%d [%x…] became a side fork", next.index, next.hash.Bytes()[:4]))
+ log.Info("â‘‚ block became a side fork", "number", next.index, "hash", next.hash)
}
// Drop the block out of the ring
if set.blocks.Value == set.blocks.Next().Value {
diff --git a/miner/worker.go b/miner/worker.go
index 2f090924e2..347de4e08a 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -26,6 +26,8 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -34,7 +36,6 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"gopkg.in/fatih/set.v0"
)
@@ -84,6 +85,7 @@ type Result struct {
// worker is the main object which takes care of applying messages to the new state
type worker struct {
config *params.ChainConfig
+ engine consensus.Engine
mu sync.Mutex
@@ -94,7 +96,6 @@ type worker struct {
agents map[Agent]struct{}
recv chan *Result
- pow pow.PoW
eth Backend
chain *core.BlockChain
@@ -123,9 +124,10 @@ type worker struct {
fullValidation bool
}
-func newWorker(config *params.ChainConfig, coinbase common.Address, eth Backend, mux *event.TypeMux) *worker {
+func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase common.Address, eth Backend, mux *event.TypeMux) *worker {
worker := &worker{
config: config,
+ engine: engine,
eth: eth,
mux: mux,
chainDb: eth.ChainDb(),
@@ -209,16 +211,10 @@ func (self *worker) stop() {
self.mu.Lock()
defer self.mu.Unlock()
if atomic.LoadInt32(&self.mining) == 1 {
- // Stop all agents.
for agent := range self.agents {
agent.Stop()
- // Remove CPU agents.
- if _, ok := agent.(*CpuAgent); ok {
- delete(self.agents, agent)
- }
}
}
-
atomic.StoreInt32(&self.mining, 0)
atomic.StoreInt32(&self.atWork, 0)
}
@@ -277,7 +273,7 @@ func (self *worker) wait() {
if self.fullValidation {
if _, err := self.chain.InsertChain(types.Blocks{block}); err != nil {
- log.Error(fmt.Sprint("mining err", err))
+ log.Error("Mined invalid block", "err", err)
continue
}
go self.mux.Post(core.NewMinedBlockEvent{Block: block})
@@ -285,19 +281,16 @@ func (self *worker) wait() {
work.state.Commit(self.config.IsEIP158(block.Number()))
parent := self.chain.GetBlock(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
- log.Error(fmt.Sprint("Invalid block found during mining"))
+ log.Error("Invalid block found during mining")
continue
}
-
- auxValidator := self.eth.BlockChain().AuxValidator()
- if err := core.ValidateHeader(self.config, auxValidator, block.Header(), parent.Header(), true, false); err != nil && err != core.BlockFutureErr {
- log.Error(fmt.Sprint("Invalid header on mined block:", err))
+ if err := self.engine.VerifyHeader(self.chain, block.Header(), false); err != nil {
+ log.Error("Invalid header on mined block", "err", err)
continue
}
-
stat, err := self.chain.WriteBlock(block)
if err != nil {
- log.Error(fmt.Sprint("error writing block to chain", err))
+ log.Error("Failed writing block to chain", "err", err)
continue
}
@@ -333,7 +326,7 @@ func (self *worker) wait() {
self.mux.Post(logs)
}
if err := core.WriteBlockReceipts(self.chainDb, block.Hash(), block.NumberU64(), receipts); err != nil {
- log.Warn(fmt.Sprint("error writing block receipts:", err))
+ log.Warn("Failed writing block receipts", "err", err)
}
}(block, work.state.Logs(), work.receipts)
}
@@ -424,9 +417,9 @@ func (self *worker) commitNewWork() {
tstamp = parent.Time().Int64() + 1
}
// this will ensure we're not going off too far in the future
- if now := time.Now().Unix(); tstamp > now+4 {
+ if now := time.Now().Unix(); tstamp > now+1 {
wait := time.Duration(tstamp-now) * time.Second
- log.Info(fmt.Sprint("We are too far in the future. Waiting for", wait))
+ log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait))
time.Sleep(wait)
}
@@ -434,13 +427,19 @@ func (self *worker) commitNewWork() {
header := &types.Header{
ParentHash: parent.Hash(),
Number: num.Add(num, common.Big1),
- Difficulty: core.CalcDifficulty(self.config, uint64(tstamp), parent.Time().Uint64(), parent.Number(), parent.Difficulty()),
GasLimit: core.CalcGasLimit(parent),
GasUsed: new(big.Int),
- Coinbase: self.coinbase,
Extra: self.extra,
Time: big.NewInt(tstamp),
}
+ // Only set the coinbase if we are mining (avoid spurious block rewards)
+ if atomic.LoadInt32(&self.mining) == 1 {
+ header.Coinbase = self.coinbase
+ }
+ if err := self.engine.Prepare(self.chain, header); err != nil {
+ log.Error("Failed to prepare header for mining", "err", err)
+ return
+ }
// If we are care about TheDAO hard-fork check whether to override the extra-data or not
if daoBlock := self.config.DAOForkBlock; daoBlock != nil {
// Check whether the block is among the fork extra-override range
@@ -457,21 +456,19 @@ func (self *worker) commitNewWork() {
// Could potentially happen if starting to mine in an odd state.
err := self.makeCurrent(parent, header)
if err != nil {
- log.Info(fmt.Sprint("Could not create new env for mining, retrying on next block."))
+ log.Error("Failed to create mining context", "err", err)
return
}
// Create the current work task and check any fork transitions needed
work := self.current
if self.config.DAOForkSupport && self.config.DAOForkBlock != nil && self.config.DAOForkBlock.Cmp(header.Number) == 0 {
- core.ApplyDAOHardFork(work.state)
+ misc.ApplyDAOHardFork(work.state)
}
-
pending, err := self.eth.TxPool().Pending()
if err != nil {
- log.Error(fmt.Sprintf("Could not fetch pending transactions: %v", err))
+ log.Error("Failed to fetch pending transactions", "err", err)
return
}
-
txs := types.NewTransactionsByPriceAndNonce(pending)
work.commitTransactions(self.mux, txs, self.gasPrice, self.chain)
@@ -488,31 +485,26 @@ func (self *worker) commitNewWork() {
break
}
if err := self.commitUncle(work, uncle.Header()); err != nil {
- log.Trace(fmt.Sprintf("Bad uncle found and will be removed (%x)\n", hash[:4]))
+ log.Trace("Bad uncle found and will be removed", "hash", hash)
log.Trace(fmt.Sprint(uncle))
badUncles = append(badUncles, hash)
} else {
- log.Debug(fmt.Sprintf("committing %x as uncle\n", hash[:4]))
+ log.Debug("Committing new uncle to block", "hash", hash)
uncles = append(uncles, uncle.Header())
}
}
for _, hash := range badUncles {
delete(self.possibleUncles, hash)
}
-
- if atomic.LoadInt32(&self.mining) == 1 {
- // commit state root after all state transitions.
- core.AccumulateRewards(work.state, header, uncles)
- header.Root = work.state.IntermediateRoot(self.config.IsEIP158(header.Number))
+ // Create the new block to seal with the consensus engine
+ if work.Block, err = self.engine.Finalize(self.chain, header, work.state, work.txs, uncles, work.receipts); err != nil {
+ log.Error("Failed to finalize block for sealing", "err", err)
+ return
}
-
- // create the new block whose nonce will be mined.
- work.Block = types.NewBlock(header, work.txs, uncles, work.receipts)
-
// We only care about logging if we're actually mining.
if atomic.LoadInt32(&self.mining) == 1 {
- log.Info(fmt.Sprintf("commit new work on block %v with %d txs & %d uncles. Took %v\n", work.Block.Number(), work.tcount, len(uncles), time.Since(tstart)))
+ log.Info("Commit new mining work", "number", work.Block.Number(), "txs", work.tcount, "uncles", len(uncles), "elapsed", common.PrettyDuration(time.Since(tstart)))
self.unconfirmed.Shift(work.Block.NumberU64() - 1)
}
self.push(work)
@@ -521,13 +513,13 @@ func (self *worker) commitNewWork() {
func (self *worker) commitUncle(work *Work, uncle *types.Header) error {
hash := uncle.Hash()
if work.uncles.Has(hash) {
- return core.UncleError("Uncle not unique")
+ return core.UncleError("uncle not unique")
}
if !work.ancestors.Has(uncle.ParentHash) {
- return core.UncleError(fmt.Sprintf("Uncle's parent unknown (%x)", uncle.ParentHash[0:4]))
+ return core.UncleError(fmt.Sprintf("uncle's parent unknown (%x)", uncle.ParentHash[0:4]))
}
if work.family.Has(hash) {
- return core.UncleError(fmt.Sprintf("Uncle already in family (%x)", hash))
+ return core.UncleError(fmt.Sprintf("uncle already in family (%x)", hash))
}
work.uncles.Add(uncle.Hash())
return nil
@@ -552,7 +544,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
// Check whether the tx is replay protected. If we're not in the EIP155 hf
// phase, start ignoring the sender until we do.
if tx.Protected() && !env.config.IsEIP155(env.header.Number) {
- log.Trace(fmt.Sprintf("Transaction (%x) is replay protected, but we haven't yet hardforked. Transaction will be ignored until we hardfork.\n", tx.Hash()))
+ log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", env.config.EIP155Block)
txs.Pop()
continue
@@ -561,7 +553,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
// Ignore any transactions (and accounts subsequently) with low gas limits
if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
// Pop the current low-priced transaction without shifting in the next from the account
- log.Info(fmt.Sprintf("Transaction (%x) below gas price (tx=%dwei ask=%dwei). All sequential txs from this address(%x) will be ignored\n", tx.Hash().Bytes()[:4], tx.GasPrice(), gasPrice, from[:4]))
+ log.Warn("Transaction below gas price", "sender", from, "hash", tx.Hash(), "have", tx.GasPrice(), "want", gasPrice)
env.lowGasTxs = append(env.lowGasTxs, tx)
txs.Pop()
@@ -575,12 +567,12 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
switch {
case core.IsGasLimitErr(err):
// Pop the current out-of-gas transaction without shifting in the next from the account
- log.Trace(fmt.Sprintf("Gas limit reached for (%x) in this block. Continue to try smaller txs\n", from[:4]))
+ log.Trace("Gas limit exceeded for current block", "sender", from)
txs.Pop()
case err != nil:
// Pop the current failed transaction without shifting in the next from the account
- log.Trace(fmt.Sprintf("Transaction (%x) failed, will be removed: %v\n", tx.Hash().Bytes()[:4], err))
+ log.Trace("Transaction failed, will be removed", "hash", tx.Hash(), "err", err)
env.failedTxs = append(env.failedTxs, tx)
txs.Pop()
diff --git a/params/util.go b/params/util.go
index 546ebb35c3..bf833d5107 100644
--- a/params/util.go
+++ b/params/util.go
@@ -38,6 +38,6 @@ var (
TestNetSpuriousDragon = big.NewInt(10)
MainNetSpuriousDragon = big.NewInt(2675000)
- TestNetChainID = big.NewInt(3) // Test net default chain ID
- MainNetChainID = big.NewInt(1) // main net default chain ID
+ TestNetChainID = big.NewInt(3) // Testnet default chain ID
+ MainNetChainID = big.NewInt(1) // Mainnet default chain ID
)
diff --git a/pow/pow.go b/pow/pow.go
deleted file mode 100644
index 4849adb3ea..0000000000
--- a/pow/pow.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2014 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package pow
-
-import (
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
-)
-
-type Block interface {
- Difficulty() *big.Int
- HashNoNonce() common.Hash
- Nonce() uint64
- MixDigest() common.Hash
- NumberU64() uint64
-}
-
-type ChainManager interface {
- GetBlockByNumber(uint64) *types.Block
- CurrentBlock() *types.Block
-}
-
-type PoW interface {
- Verify(block Block) error
- Search(block Block, stop <-chan struct{}) (uint64, []byte)
- Hashrate() float64
-}
-
-// FakePow is a non-validating proof of work implementation.
-// It returns true from Verify for any block.
-type FakePow struct{}
-
-// Verify implements PoW, returning a success for an input.
-func (pow FakePow) Verify(block Block) error { return nil }
-
-// Search implements PoW, returning the nonce 0 for any call.
-func (pow FakePow) Search(block Block, stop <-chan struct{}) (uint64, []byte) {
- return 0, nil
-}
-
-// Hashrate implements PoW, returning 0.
-func (pow FakePow) Hashrate() float64 { return 0 }
diff --git a/tests/block_test_util.go b/tests/block_test_util.go
index 456a4ccb86..b9678a77bd 100644
--- a/tests/block_test_util.go
+++ b/tests/block_test_util.go
@@ -27,6 +27,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
@@ -35,7 +36,6 @@ import (
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/pow"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -172,7 +172,7 @@ func runBlockTest(homesteadBlock, daoForkBlock, gasPriceFork *big.Int, test *Blo
core.WriteHeadBlockHash(db, test.Genesis.Hash())
evmux := new(event.TypeMux)
config := ¶ms.ChainConfig{HomesteadBlock: homesteadBlock, DAOForkBlock: daoForkBlock, DAOForkSupport: true, EIP150Block: gasPriceFork}
- chain, err := core.NewBlockChain(db, config, pow.NewSharedEthash(), evmux, vm.Config{})
+ chain, err := core.NewBlockChain(db, config, ethash.NewShared(), evmux, vm.Config{})
if err != nil {
return err
}
diff --git a/trie/iterator.go b/trie/iterator.go
index 234c49ecc8..ddc674d2bc 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -18,6 +18,7 @@ package trie
import (
"bytes"
+
"github.com/ethereum/go-ethereum/common"
)