all: nuke total difficulty (#30744)

The total difficulty is the sum of all block difficulties from genesis
to a certain block. This value was used in PoW for deciding which chain
is heavier, and thus which chain to select. Since PoS has a different
fork selection algorithm, all blocks since the merge have a difficulty
of 0, and all total difficulties are the same for the past 2 years.

Whilst the TDs are mostly useless nowadays, there was never really a
reason to mess around removing them since they are so tiny. This
reasoning changes when we go down the path of pruned chain history. In
order to reconstruct any TD, we **must** retrieve all the headers from
chain head to genesis and then iterate all the difficulties to compute
the TD.

In a world where we completely prune past chain segments (bodies,
receipts, headers), it is not possible to reconstruct the TD at all. In
a world where we still keep chain headers and prune only the rest,
reconstructing it possible as long as we process (or download) the chain
forward from genesis, but trying to snap sync the head first and
backfill later hits the same issue, the TD becomes impossible to
calculate until genesis is backfilled.

All in all, the TD is a messy out-of-state, out-of-consensus computed
field that is overall useless nowadays, but code relying on it forces
the client into certain modes of operation and prevents other modes or
other optimizations. This PR completely nukes out the TD from the node.
It doesn't compute it, it doesn't operate on it, it's as if it didn't
even exist.

Caveats:

- Whenever we have APIs that return TD (devp2p handshake, tracer, etc.)
we return a TD of 0.
- For era files, we recompute the TD during export time (fairly quick)
to retain the format content.
- It is not possible to "verify" the merge point (i.e. with TD gone, TTD
is useless). Since we're not verifying PoW any more, just blindly trust
it, not verifying but blindly trusting the many year old merge point
seems just the same trust model.
- Our tests still need to be able to generate pre and post merge blocks,
so they need a new way to split the merge without TTD. The PR introduces
a settable ttdBlock field on the consensus object which is used by tests
as the block where originally the TTD happened. This is not needed for
live nodes, we never want to generate old blocks.
- One merge transition consensus test was disabled. With a
non-operational TD, testing how the client reacts to TTD is useless, it
cannot react.

Questions:

- Should we also drop total terminal difficulty from the genesis json?
It's a number we cannot react on any more, so maybe it would be cleaner
to get rid of even more concepts.

---------

Co-authored-by: Gary Rong <garyrong0905@gmail.com>
pull/31084/head
Péter Szilágyi 2 days ago committed by GitHub
parent 9516e0f6b6
commit 39638c81c5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 6
      cmd/devp2p/internal/ethtest/chain.go
  2. 38
      cmd/devp2p/internal/ethtest/suite.go
  3. 12
      cmd/utils/cmd.go
  4. 129
      consensus/beacon/consensus.go
  5. 41
      consensus/beacon/faker.go
  6. 3
      consensus/consensus.go
  7. 1
      core/bench_test.go
  8. 60
      core/blockchain.go
  9. 7
      core/blockchain_reader.go
  10. 91
      core/blockchain_test.go
  11. 4
      core/chain_makers.go
  12. 5
      core/chain_makers_test.go
  13. 1
      core/genesis.go
  14. 9
      core/genesis_test.go
  15. 37
      core/headerchain.go
  16. 4
      core/headerchain_test.go
  17. 68
      core/rawdb/accessors_chain.go
  18. 40
      core/rawdb/accessors_chain_test.go
  19. 12
      core/rawdb/ancient_scheme.go
  20. 8
      core/rawdb/chain_freezer.go
  21. 2
      core/rawdb/database.go
  22. 7
      core/rawdb/schema.go
  23. 1
      core/tracing/hooks.go
  24. 2
      core/txindexer_test.go
  25. 7
      eth/api_backend.go
  26. 34
      eth/catalyst/api.go
  27. 91
      eth/catalyst/api_test.go
  28. 4
      eth/downloader/downloader.go
  29. 7
      eth/downloader/downloader_test.go
  30. 2
      eth/downloader/peer.go
  31. 6
      eth/gasprice/gasprice_test.go
  32. 3
      eth/handler.go
  33. 6
      eth/handler_eth_test.go
  34. 19
      eth/protocols/eth/handler.go
  35. 13
      eth/protocols/eth/handler_test.go
  36. 11
      eth/protocols/eth/handshake.go
  37. 12
      eth/protocols/eth/handshake_test.go
  38. 24
      eth/protocols/eth/peer.go
  39. 2
      eth/tracers/api_test.go
  40. 5
      eth/tracers/internal/tracetest/supply_test.go
  41. 7
      ethstats/ethstats.go
  42. 5
      graphql/graphql_test.go
  43. 6
      internal/ethapi/api_test.go
  44. 1
      internal/ethapi/transaction_args_test.go
  45. 3
      tests/block_test.go

@ -143,11 +143,7 @@ func (c *Chain) ForkID() forkid.ID {
// TD calculates the total difficulty of the chain at the // TD calculates the total difficulty of the chain at the
// chain head. // chain head.
func (c *Chain) TD() *big.Int { func (c *Chain) TD() *big.Int {
sum := new(big.Int) return new(big.Int)
for _, block := range c.blocks[:c.Len()] {
sum.Add(sum, block.Difficulty())
}
return sum
} }
// GetBlock returns the block at the specified number. // GetBlock returns the block at the specified number.

@ -18,7 +18,6 @@ package ethtest
import ( import (
"crypto/rand" "crypto/rand"
"math/big"
"reflect" "reflect"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -74,7 +73,6 @@ func (s *Suite) EthTests() []utesting.Test {
{Name: "GetBlockBodies", Fn: s.TestGetBlockBodies}, {Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
// // malicious handshakes + status // // malicious handshakes + status
{Name: "MaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "MaliciousHandshake", Fn: s.TestMaliciousHandshake},
{Name: "MaliciousStatus", Fn: s.TestMaliciousStatus},
// test transactions // test transactions
{Name: "LargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true}, {Name: "LargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true},
{Name: "Transaction", Fn: s.TestTransaction}, {Name: "Transaction", Fn: s.TestTransaction},
@ -453,42 +451,6 @@ func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
} }
} }
func (s *Suite) TestMaliciousStatus(t *utesting.T) {
t.Log(`This test sends a malicious eth Status message to the node and expects a disconnect.`)
conn, err := s.dial()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err := conn.handshake(); err != nil {
t.Fatalf("handshake failed: %v", err)
}
// Create status with large total difficulty.
status := &eth.StatusPacket{
ProtocolVersion: uint32(conn.negotiatedProtoVersion),
NetworkID: s.chain.config.ChainID.Uint64(),
TD: new(big.Int).SetBytes(randBuf(2048)),
Head: s.chain.Head().Hash(),
Genesis: s.chain.GetBlock(0).Hash(),
ForkID: s.chain.ForkID(),
}
if err := conn.statusExchange(s.chain, status); err != nil {
t.Fatalf("status exchange failed: %v", err)
}
// Wait for disconnect.
code, _, err := conn.Read()
if err != nil {
t.Fatalf("error reading from connection: %v", err)
}
switch code {
case discMsg:
break
default:
t.Fatalf("expected disconnect, got: %d", code)
}
}
func (s *Suite) TestTransaction(t *utesting.T) { func (s *Suite) TestTransaction(t *utesting.T) {
t.Log(`This test sends a valid transaction to the node and checks if the t.Log(`This test sends a valid transaction to the node and checks if the
transaction gets propagated.`) transaction gets propagated.`)

@ -25,6 +25,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math/big"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
@ -422,6 +423,10 @@ func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) er
buf = bytes.NewBuffer(nil) buf = bytes.NewBuffer(nil)
checksums []string checksums []string
) )
td := new(big.Int)
for i := uint64(0); i < first; i++ {
td.Add(td, bc.GetHeaderByNumber(i).Difficulty)
}
for i := first; i <= last; i += step { for i := first; i <= last; i += step {
err := func() error { err := func() error {
filename := filepath.Join(dir, era.Filename(network, int(i/step), common.Hash{})) filename := filepath.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
@ -444,11 +449,8 @@ func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) er
if receipts == nil { if receipts == nil {
return fmt.Errorf("export failed on #%d: receipts not found", n) return fmt.Errorf("export failed on #%d: receipts not found", n)
} }
td := bc.GetTd(block.Hash(), block.NumberU64()) td.Add(td, block.Difficulty())
if td == nil { if err := w.Add(block, receipts, new(big.Int).Set(td)); err != nil {
return fmt.Errorf("export failed on #%d: total difficulty not found", n)
}
if err := w.Add(block, receipts, td); err != nil {
return err return err
} }
} }

@ -61,7 +61,8 @@ var (
// is only used for necessary consensus checks. The legacy consensus engine can be any // is only used for necessary consensus checks. The legacy consensus engine can be any
// engine implements the consensus interface (except the beacon itself). // engine implements the consensus interface (except the beacon itself).
type Beacon struct { type Beacon struct {
ethone consensus.Engine // Original consensus engine used in eth1, e.g. ethash or clique ethone consensus.Engine // Original consensus engine used in eth1, e.g. ethash or clique
ttdblock *uint64 // Merge block-number for testchain generation without TTDs
} }
// New creates a consensus engine with the given embedded eth1 engine. // New creates a consensus engine with the given embedded eth1 engine.
@ -72,6 +73,18 @@ func New(ethone consensus.Engine) *Beacon {
return &Beacon{ethone: ethone} return &Beacon{ethone: ethone}
} }
// TestingTTDBlock is a replacement mechanism for TTD-based pre-/post-merge
// splitting. With chain history deletion, TD calculations become impossible.
// This is fine for progressing the live chain, but to be able to generate test
// chains, we do need a split point. This method supports setting an explicit
// block number to use as the splitter *for testing*, instead of having to keep
// the notion of TDs in the client just for testing.
//
// The block with supplied number is regarded as the last pre-merge block.
func (beacon *Beacon) TestingTTDBlock(number uint64) {
beacon.ttdblock = &number
}
// Author implements consensus.Engine, returning the verified author of the block. // Author implements consensus.Engine, returning the verified author of the block.
func (beacon *Beacon) Author(header *types.Header) (common.Address, error) { func (beacon *Beacon) Author(header *types.Header) (common.Address, error) {
if !beacon.IsPoSHeader(header) { if !beacon.IsPoSHeader(header) {
@ -83,67 +96,55 @@ func (beacon *Beacon) Author(header *types.Header) (common.Address, error) {
// VerifyHeader checks whether a header conforms to the consensus rules of the // VerifyHeader checks whether a header conforms to the consensus rules of the
// stock Ethereum consensus engine. // stock Ethereum consensus engine.
func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error {
reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1) // During the live merge transition, the consensus engine used the terminal
if err != nil { // total difficulty to detect when PoW (PoA) switched to PoS. Maintaining the
return err // total difficulty values however require applying all the blocks from the
} // genesis to build up the TD. This stops being a possibility if the tail of
if !reached { // the chain is pruned already during sync.
return beacon.ethone.VerifyHeader(chain, header) //
} // One heuristic that can be used to distinguish pre-merge and post-merge
// Short circuit if the parent is not known // blocks is whether their *difficulty* is >0 or ==0 respectively. This of
// course would mean that we cannot prove anymore for a past chain that it
// truly transitioned at the correct TTD, but if we consider that ancient
// point in time finalized a long time ago, there should be no attempt from
// the consensus client to rewrite very old history.
//
// One thing that's probably not needed but which we can add to make this
// verification even stricter is to enforce that the chain can switch from
// >0 to ==0 TD only once by forbidding an ==0 to be followed by a >0.
// Verify that we're not reverting to pre-merge from post-merge
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parent == nil { if parent == nil {
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
} }
// Sanity checks passed, do a proper verification if parent.Difficulty.Sign() == 0 && header.Difficulty.Sign() > 0 {
return beacon.verifyHeader(chain, header, parent) return consensus.ErrInvalidTerminalBlock
} }
// Check >0 TDs with pre-merge, --0 TDs with post-merge rules
// errOut constructs an error channel with prefilled errors inside. if header.Difficulty.Sign() > 0 {
func errOut(n int, err error) chan error { return beacon.ethone.VerifyHeader(chain, header)
errs := make(chan error, n)
for i := 0; i < n; i++ {
errs <- err
} }
return errs return beacon.verifyHeader(chain, header, parent)
} }
// splitHeaders splits the provided header batch into two parts according to // splitHeaders splits the provided header batch into two parts according to
// the configured ttd. It requires the parent of header batch along with its // the difficulty field.
// td are stored correctly in chain. If ttd is not configured yet, all headers //
// will be treated legacy PoW headers.
// Note, this function will not verify the header validity but just split them. // Note, this function will not verify the header validity but just split them.
func (beacon *Beacon) splitHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) ([]*types.Header, []*types.Header, error) { func (beacon *Beacon) splitHeaders(headers []*types.Header) ([]*types.Header, []*types.Header) {
// TTD is not defined yet, all headers should be in legacy format.
ttd := chain.Config().TerminalTotalDifficulty
ptd := chain.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1)
if ptd == nil {
return nil, nil, consensus.ErrUnknownAncestor
}
// The entire header batch already crosses the transition.
if ptd.Cmp(ttd) >= 0 {
return nil, headers, nil
}
var ( var (
preHeaders = headers preHeaders = headers
postHeaders []*types.Header postHeaders []*types.Header
td = new(big.Int).Set(ptd)
tdPassed bool
) )
for i, header := range headers { for i, header := range headers {
if tdPassed { if header.Difficulty.Sign() == 0 {
preHeaders = headers[:i] preHeaders = headers[:i]
postHeaders = headers[i:] postHeaders = headers[i:]
break break
} }
td = td.Add(td, header.Difficulty)
if td.Cmp(ttd) >= 0 {
// This is the last PoW header, it still belongs to
// the preHeaders, so we cannot split+break yet.
tdPassed = true
}
} }
return preHeaders, postHeaders, nil return preHeaders, postHeaders
} }
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
@ -151,10 +152,7 @@ func (beacon *Beacon) splitHeaders(chain consensus.ChainHeaderReader, headers []
// a results channel to retrieve the async verifications. // a results channel to retrieve the async verifications.
// VerifyHeaders expect the headers to be ordered and continuous. // VerifyHeaders expect the headers to be ordered and continuous.
func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
preHeaders, postHeaders, err := beacon.splitHeaders(chain, headers) preHeaders, postHeaders := beacon.splitHeaders(headers)
if err != nil {
return make(chan struct{}), errOut(len(headers), err)
}
if len(postHeaders) == 0 { if len(postHeaders) == 0 {
return beacon.ethone.VerifyHeaders(chain, headers) return beacon.ethone.VerifyHeaders(chain, headers)
} }
@ -334,12 +332,15 @@ func (beacon *Beacon) verifyHeaders(chain consensus.ChainHeaderReader, headers [
// Prepare implements consensus.Engine, initializing the difficulty field of a // Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the beacon protocol. The changes are done inline. // header to conform to the beacon protocol. The changes are done inline.
func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { func (beacon *Beacon) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
// Transition isn't triggered yet, use the legacy rules for preparation. // The beacon engine requires access to total difficulties to be able to
reached, err := IsTTDReached(chain, header.ParentHash, header.Number.Uint64()-1) // seal pre-merge and post-merge blocks. With the transition to removing
if err != nil { // old blocks, TDs become unaccessible, thus making TTD based pre-/post-
return err // merge decisions impossible.
} //
if !reached { // We do not need to seal non-merge blocks anymore live, but we do need
// to be able to generate test chains, thus we're reverting to a testing-
// settable field to direct that.
if beacon.ttdblock != nil && *beacon.ttdblock >= header.Number.Uint64() {
return beacon.ethone.Prepare(chain, header) return beacon.ethone.Prepare(chain, header)
} }
header.Difficulty = beaconDifficulty header.Difficulty = beaconDifficulty
@ -449,8 +450,15 @@ func (beacon *Beacon) SealHash(header *types.Header) common.Hash {
// the difficulty that a new block should have when created at time // the difficulty that a new block should have when created at time
// given the parent block's time and difficulty. // given the parent block's time and difficulty.
func (beacon *Beacon) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { func (beacon *Beacon) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
// Transition isn't triggered yet, use the legacy rules for calculation // The beacon engine requires access to total difficulties to be able to
if reached, _ := IsTTDReached(chain, parent.Hash(), parent.Number.Uint64()); !reached { // seal pre-merge and post-merge blocks. With the transition to removing
// old blocks, TDs become unaccessible, thus making TTD based pre-/post-
// merge decisions impossible.
//
// We do not need to seal non-merge blocks anymore live, but we do need
// to be able to generate test chains, thus we're reverting to a testing-
// settable field to direct that.
if beacon.ttdblock != nil && *beacon.ttdblock > parent.Number.Uint64() {
return beacon.ethone.CalcDifficulty(chain, time, parent) return beacon.ethone.CalcDifficulty(chain, time, parent)
} }
return beaconDifficulty return beaconDifficulty
@ -491,14 +499,3 @@ func (beacon *Beacon) SetThreads(threads int) {
th.SetThreads(threads) th.SetThreads(threads)
} }
} }
// IsTTDReached checks if the TotalTerminalDifficulty has been surpassed on the `parentHash` block.
// It depends on the parentHash already being stored in the database.
// If the parentHash is not stored in the database a UnknownAncestor error is returned.
func IsTTDReached(chain consensus.ChainHeaderReader, parentHash common.Hash, parentNumber uint64) (bool, error) {
td := chain.GetTd(parentHash, parentNumber)
if td == nil {
return false, consensus.ErrUnknownAncestor
}
return td.Cmp(chain.Config().TerminalTotalDifficulty) >= 0, nil
}

@ -1,41 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package beacon
import (
"math/big"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
)
// NewFaker creates a fake consensus engine for testing.
// The fake engine simulates a merged network.
// It can not be used to test the merge transition.
// This type is needed since the fakeChainReader can not be used with
// a normal beacon consensus engine.
func NewFaker() consensus.Engine {
return new(faker)
}
type faker struct {
Beacon
}
func (f *faker) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
return beaconDifficulty
}

@ -45,9 +45,6 @@ type ChainHeaderReader interface {
// GetHeaderByHash retrieves a block header from the database by its hash. // GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.Header GetHeaderByHash(hash common.Hash) *types.Header
// GetTd retrieves the total difficulty from the database by hash and number.
GetTd(hash common.Hash, number uint64) *big.Int
} }
// ChainReader defines a small collection of methods needed to access the local // ChainReader defines a small collection of methods needed to access the local

@ -285,7 +285,6 @@ func makeChainForBench(db ethdb.Database, genesis *Genesis, full bool, count uin
rawdb.WriteHeader(db, header) rawdb.WriteHeader(db, header)
rawdb.WriteCanonicalHash(db, hash, n) rawdb.WriteCanonicalHash(db, hash, n)
rawdb.WriteTd(db, hash, n, big.NewInt(int64(n+1)))
if n == 0 { if n == 0 {
rawdb.WriteChainConfig(db, hash, genesis.Config) rawdb.WriteChainConfig(db, hash, genesis.Config)

@ -127,7 +127,10 @@ const (
// - Version 8 // - Version 8
// The following incompatible database changes were added: // The following incompatible database changes were added:
// * New scheme for contract code in order to separate the codes and trie nodes // * New scheme for contract code in order to separate the codes and trie nodes
BlockChainVersion uint64 = 8 // - Version 9
// Total difficulty has been removed from both the key-value store and the
// ancient store, the td freezer table has been deprecated since that.
BlockChainVersion uint64 = 9
) )
// CacheConfig contains the configuration values for the trie database // CacheConfig contains the configuration values for the trie database
@ -543,21 +546,16 @@ func (bc *BlockChain) loadLastState() error {
var ( var (
currentSnapBlock = bc.CurrentSnapBlock() currentSnapBlock = bc.CurrentSnapBlock()
currentFinalBlock = bc.CurrentFinalBlock() currentFinalBlock = bc.CurrentFinalBlock()
headerTd = bc.GetTd(headHeader.Hash(), headHeader.Number.Uint64())
blockTd = bc.GetTd(headBlock.Hash(), headBlock.NumberU64())
) )
if headHeader.Hash() != headBlock.Hash() { if headHeader.Hash() != headBlock.Hash() {
log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0))) log.Info("Loaded most recent local header", "number", headHeader.Number, "hash", headHeader.Hash(), "age", common.PrettyAge(time.Unix(int64(headHeader.Time), 0)))
} }
log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "td", blockTd, "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0))) log.Info("Loaded most recent local block", "number", headBlock.Number(), "hash", headBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(headBlock.Time()), 0)))
if headBlock.Hash() != currentSnapBlock.Hash() { if headBlock.Hash() != currentSnapBlock.Hash() {
snapTd := bc.GetTd(currentSnapBlock.Hash(), currentSnapBlock.Number.Uint64()) log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
log.Info("Loaded most recent local snap block", "number", currentSnapBlock.Number, "hash", currentSnapBlock.Hash(), "td", snapTd, "age", common.PrettyAge(time.Unix(int64(currentSnapBlock.Time), 0)))
} }
if currentFinalBlock != nil { if currentFinalBlock != nil {
finalTd := bc.GetTd(currentFinalBlock.Hash(), currentFinalBlock.Number.Uint64()) log.Info("Loaded most recent local finalized block", "number", currentFinalBlock.Number, "hash", currentFinalBlock.Hash(), "age", common.PrettyAge(time.Unix(int64(currentFinalBlock.Time), 0)))
log.Info("Loaded most recent local finalized block", "number", currentFinalBlock.Number, "hash", currentFinalBlock.Hash(), "td", finalTd, "age", common.PrettyAge(time.Unix(int64(currentFinalBlock.Time), 0)))
} }
if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil { if pivot := rawdb.ReadLastPivotNumber(bc.db); pivot != nil {
log.Info("Loaded last snap-sync pivot marker", "number", *pivot) log.Info("Loaded last snap-sync pivot marker", "number", *pivot)
@ -989,7 +987,6 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
// Prepare the genesis block and reinitialise the chain // Prepare the genesis block and reinitialise the chain
batch := bc.db.NewBatch() batch := bc.db.NewBatch()
rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
rawdb.WriteBlock(batch, genesis) rawdb.WriteBlock(batch, genesis)
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
log.Crit("Failed to write genesis block", "err", err) log.Crit("Failed to write genesis block", "err", err)
@ -1262,8 +1259,7 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// Ensure genesis is in ancients. // Ensure genesis is in ancients.
if first.NumberU64() == 1 { if first.NumberU64() == 1 {
if frozen, _ := bc.db.Ancients(); frozen == 0 { if frozen, _ := bc.db.Ancients(); frozen == 0 {
td := bc.genesisBlock.Difficulty() writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []types.Receipts{nil})
writeSize, err := rawdb.WriteAncientBlocks(bc.db, []*types.Block{bc.genesisBlock}, []types.Receipts{nil}, td)
if err != nil { if err != nil {
log.Error("Error writing genesis to ancients", "err", err) log.Error("Error writing genesis to ancients", "err", err)
return 0, err return 0, err
@ -1278,10 +1274,8 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
if !bc.HasHeader(last.Hash(), last.NumberU64()) { if !bc.HasHeader(last.Hash(), last.NumberU64()) {
return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4]) return 0, fmt.Errorf("containing header #%d [%x..] unknown", last.Number(), last.Hash().Bytes()[:4])
} }
// Write all chain data to ancients. // Write all chain data to ancients.
td := bc.GetTd(first.Hash(), first.NumberU64()) writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain)
writeSize, err := rawdb.WriteAncientBlocks(bc.db, blockChain, receiptChain, td)
if err != nil { if err != nil {
log.Error("Error importing chain data to ancients", "err", err) log.Error("Error importing chain data to ancients", "err", err)
return 0, err return 0, err
@ -1421,12 +1415,11 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// writeBlockWithoutState writes only the block and its metadata to the database, // writeBlockWithoutState writes only the block and its metadata to the database,
// but does not write any state. This is used to construct competing side forks // but does not write any state. This is used to construct competing side forks
// up to the point where they exceed the canonical total difficulty. // up to the point where they exceed the canonical total difficulty.
func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (err error) { func (bc *BlockChain) writeBlockWithoutState(block *types.Block) (err error) {
if bc.insertStopped() { if bc.insertStopped() {
return errInsertionInterrupted return errInsertionInterrupted
} }
batch := bc.db.NewBatch() batch := bc.db.NewBatch()
rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
rawdb.WriteBlock(batch, block) rawdb.WriteBlock(batch, block)
if err := batch.Write(); err != nil { if err := batch.Write(); err != nil {
log.Crit("Failed to write block into disk", "err", err) log.Crit("Failed to write block into disk", "err", err)
@ -1450,20 +1443,14 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
// writeBlockWithState writes block, metadata and corresponding state data to the // writeBlockWithState writes block, metadata and corresponding state data to the
// database. // database.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error { func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, statedb *state.StateDB) error {
// Calculate the total difficulty of the block if !bc.HasHeader(block.ParentHash(), block.NumberU64()-1) {
ptd := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
if ptd == nil {
return consensus.ErrUnknownAncestor return consensus.ErrUnknownAncestor
} }
// Make sure no inconsistent state is leaked during insertion
externTd := new(big.Int).Add(block.Difficulty(), ptd)
// Irrelevant of the canonical status, write the block itself to the database. // Irrelevant of the canonical status, write the block itself to the database.
// //
// Note all the components of block(td, hash->number map, header, body, receipts) // Note all the components of block(hash->number map, header, body, receipts)
// should be written atomically. BlockBatch is used for containing all components. // should be written atomically. BlockBatch is used for containing all components.
blockBatch := bc.db.NewBatch() blockBatch := bc.db.NewBatch()
rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
rawdb.WriteBlock(blockBatch, block) rawdb.WriteBlock(blockBatch, block)
rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts) rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
rawdb.WritePreimages(blockBatch, statedb.Preimages()) rawdb.WritePreimages(blockBatch, statedb.Preimages())
@ -1757,7 +1744,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness
if bc.logger != nil && bc.logger.OnSkippedBlock != nil { if bc.logger != nil && bc.logger.OnSkippedBlock != nil {
bc.logger.OnSkippedBlock(tracing.BlockEvent{ bc.logger.OnSkippedBlock(tracing.BlockEvent{
Block: block, Block: block,
TD: bc.GetTd(block.ParentHash(), block.NumberU64()-1),
Finalized: bc.CurrentFinalBlock(), Finalized: bc.CurrentFinalBlock(),
Safe: bc.CurrentSafeBlock(), Safe: bc.CurrentSafeBlock(),
}) })
@ -1882,10 +1868,8 @@ type blockProcessingResult struct {
// it writes the block and associated state to database. // it writes the block and associated state to database.
func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, start time.Time, setHead bool) (_ *blockProcessingResult, blockEndErr error) { func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, start time.Time, setHead bool) (_ *blockProcessingResult, blockEndErr error) {
if bc.logger != nil && bc.logger.OnBlockStart != nil { if bc.logger != nil && bc.logger.OnBlockStart != nil {
td := bc.GetTd(block.ParentHash(), block.NumberU64()-1)
bc.logger.OnBlockStart(tracing.BlockEvent{ bc.logger.OnBlockStart(tracing.BlockEvent{
Block: block, Block: block,
TD: td,
Finalized: bc.CurrentFinalBlock(), Finalized: bc.CurrentFinalBlock(),
Safe: bc.CurrentSafeBlock(), Safe: bc.CurrentSafeBlock(),
}) })
@ -1995,10 +1979,8 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
// switch over to the new chain if the TD exceeded the current chain. // switch over to the new chain if the TD exceeded the current chain.
// insertSideChain is only used pre-merge. // insertSideChain is only used pre-merge.
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, makeWitness bool) (*stateless.Witness, int, error) { func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, makeWitness bool) (*stateless.Witness, int, error) {
var ( var current = bc.CurrentBlock()
externTd *big.Int
current = bc.CurrentBlock()
)
// The first sidechain block error is already verified to be ErrPrunedAncestor. // The first sidechain block error is already verified to be ErrPrunedAncestor.
// Since we don't import them here, we expect ErrUnknownAncestor for the remaining // Since we don't import them here, we expect ErrUnknownAncestor for the remaining
// ones. Any other errors means that the block is invalid, and should not be written // ones. Any other errors means that the block is invalid, and should not be written
@ -2010,11 +1992,6 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, ma
canonical := bc.GetBlockByNumber(number) canonical := bc.GetBlockByNumber(number)
if canonical != nil && canonical.Hash() == block.Hash() { if canonical != nil && canonical.Hash() == block.Hash() {
// Not a sidechain block, this is a re-import of a canon block which has it's state pruned // Not a sidechain block, this is a re-import of a canon block which has it's state pruned
// Collect the TD of the block. Since we know it's a canon one,
// we can get it directly, and not (like further below) use
// the parent and then add the block on top
externTd = bc.GetTd(block.Hash(), block.NumberU64())
continue continue
} }
if canonical != nil && canonical.Root() == block.Root() { if canonical != nil && canonical.Root() == block.Root() {
@ -2033,14 +2010,9 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, ma
return nil, it.index, errors.New("sidechain ghost-state attack") return nil, it.index, errors.New("sidechain ghost-state attack")
} }
} }
if externTd == nil {
externTd = bc.GetTd(block.ParentHash(), block.NumberU64()-1)
}
externTd = new(big.Int).Add(externTd, block.Difficulty())
if !bc.HasBlock(block.Hash(), block.NumberU64()) { if !bc.HasBlock(block.Hash(), block.NumberU64()) {
start := time.Now() start := time.Now()
if err := bc.writeBlockWithoutState(block, externTd); err != nil { if err := bc.writeBlockWithoutState(block); err != nil {
return nil, it.index, err return nil, it.index, err
} }
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(), log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),

@ -18,7 +18,6 @@ package core
import ( import (
"errors" "errors"
"math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus"
@ -307,12 +306,6 @@ func (bc *BlockChain) GetTransactionLookup(hash common.Hash) (*rawdb.LegacyTxLoo
return lookup, tx, nil return lookup, tx, nil
} }
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash and number, caching it if found.
func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
return bc.hc.GetTd(hash, number)
}
// HasState checks if state trie is fully present in the database or not. // HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool { func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.statedb.OpenTrie(hash) _, err := bc.statedb.OpenTrie(hash)

@ -88,7 +88,7 @@ func newGwei(n int64) *big.Int {
} }
// Test fork of length N starting from block i // Test fork of length N starting from block i
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string) { func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) {
// Copy old chain up to #i into a new db // Copy old chain up to #i into a new db
genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme) genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme)
if err != nil { if err != nil {
@ -125,27 +125,15 @@ func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, compara
} }
} }
// Sanity check that the forked chain can be imported into the original // Sanity check that the forked chain can be imported into the original
var tdPre, tdPost *big.Int
if full { if full {
cur := blockchain.CurrentBlock()
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
if err := testBlockChainImport(blockChainB, blockchain); err != nil { if err := testBlockChainImport(blockChainB, blockchain); err != nil {
t.Fatalf("failed to import forked block chain: %v", err) t.Fatalf("failed to import forked block chain: %v", err)
} }
last := blockChainB[len(blockChainB)-1]
tdPost = blockchain.GetTd(last.Hash(), last.NumberU64())
} else { } else {
cur := blockchain.CurrentHeader()
tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64())
if err := testHeaderChainImport(headerChainB, blockchain); err != nil { if err := testHeaderChainImport(headerChainB, blockchain); err != nil {
t.Fatalf("failed to import forked header chain: %v", err) t.Fatalf("failed to import forked header chain: %v", err)
} }
last := headerChainB[len(headerChainB)-1]
tdPost = blockchain.GetTd(last.Hash(), last.Number.Uint64())
} }
// Compare the total difficulties of the chains
comparator(tdPre, tdPost)
} }
// testBlockChainImport tries to process a chain of blocks, writing them into // testBlockChainImport tries to process a chain of blocks, writing them into
@ -179,7 +167,6 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
} }
blockchain.chainmu.MustLock() blockchain.chainmu.MustLock()
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
rawdb.WriteBlock(blockchain.db, block) rawdb.WriteBlock(blockchain.db, block)
statedb.Commit(block.NumberU64(), false, false) statedb.Commit(block.NumberU64(), false, false)
blockchain.chainmu.Unlock() blockchain.chainmu.Unlock()
@ -197,7 +184,6 @@ func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error
} }
// Manually insert the header into the database, but don't reorganise (allows subsequent testing) // Manually insert the header into the database, but don't reorganise (allows subsequent testing)
blockchain.chainmu.MustLock() blockchain.chainmu.MustLock()
rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash, header.Number.Uint64()-1)))
rawdb.WriteHeader(blockchain.db, header) rawdb.WriteHeader(blockchain.db, header)
blockchain.chainmu.Unlock() blockchain.chainmu.Unlock()
} }
@ -294,17 +280,11 @@ func testExtendCanonical(t *testing.T, full bool, scheme string) {
} }
defer processor.Stop() defer processor.Stop()
// Define the difficulty comparator
better := func(td1, td2 *big.Int) {
if td2.Cmp(td1) <= 0 {
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1)
}
}
// Start fork from current height // Start fork from current height
testFork(t, processor, length, 1, full, better, scheme) testFork(t, processor, length, 1, full, scheme)
testFork(t, processor, length, 2, full, better, scheme) testFork(t, processor, length, 2, full, scheme)
testFork(t, processor, length, 5, full, better, scheme) testFork(t, processor, length, 5, full, scheme)
testFork(t, processor, length, 10, full, better, scheme) testFork(t, processor, length, 10, full, scheme)
} }
// Tests that given a starting canonical chain of a given size, it can be extended // Tests that given a starting canonical chain of a given size, it can be extended
@ -353,19 +333,13 @@ func testShorterFork(t *testing.T, full bool, scheme string) {
} }
defer processor.Stop() defer processor.Stop()
// Define the difficulty comparator
worse := func(td1, td2 *big.Int) {
if td2.Cmp(td1) >= 0 {
t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1)
}
}
// Sum of numbers must be less than `length` for this to be a shorter fork // Sum of numbers must be less than `length` for this to be a shorter fork
testFork(t, processor, 0, 3, full, worse, scheme) testFork(t, processor, 0, 3, full, scheme)
testFork(t, processor, 0, 7, full, worse, scheme) testFork(t, processor, 0, 7, full, scheme)
testFork(t, processor, 1, 1, full, worse, scheme) testFork(t, processor, 1, 1, full, scheme)
testFork(t, processor, 1, 7, full, worse, scheme) testFork(t, processor, 1, 7, full, scheme)
testFork(t, processor, 5, 3, full, worse, scheme) testFork(t, processor, 5, 3, full, scheme)
testFork(t, processor, 5, 4, full, worse, scheme) testFork(t, processor, 5, 4, full, scheme)
} }
// Tests that given a starting canonical chain of a given size, creating shorter // Tests that given a starting canonical chain of a given size, creating shorter
@ -476,19 +450,13 @@ func testEqualFork(t *testing.T, full bool, scheme string) {
} }
defer processor.Stop() defer processor.Stop()
// Define the difficulty comparator
equal := func(td1, td2 *big.Int) {
if td2.Cmp(td1) != 0 {
t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1)
}
}
// Sum of numbers must be equal to `length` for this to be an equal fork // Sum of numbers must be equal to `length` for this to be an equal fork
testFork(t, processor, 0, 10, full, equal, scheme) testFork(t, processor, 0, 10, full, scheme)
testFork(t, processor, 1, 9, full, equal, scheme) testFork(t, processor, 1, 9, full, scheme)
testFork(t, processor, 2, 8, full, equal, scheme) testFork(t, processor, 2, 8, full, scheme)
testFork(t, processor, 5, 5, full, equal, scheme) testFork(t, processor, 5, 5, full, scheme)
testFork(t, processor, 6, 4, full, equal, scheme) testFork(t, processor, 6, 4, full, scheme)
testFork(t, processor, 9, 1, full, equal, scheme) testFork(t, processor, 9, 1, full, scheme)
} }
// Tests that given a starting canonical chain of a given size, creating equal // Tests that given a starting canonical chain of a given size, creating equal
@ -647,19 +615,6 @@ func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme
} }
} }
} }
// Make sure the chain total difficulty is the correct one
want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
if full {
cur := blockchain.CurrentBlock()
if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
}
} else {
cur := blockchain.CurrentHeader()
if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 {
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
}
}
} }
// Tests chain insertions in the face of one entity containing an invalid nonce. // Tests chain insertions in the face of one entity containing an invalid nonce.
@ -809,12 +764,6 @@ func testFastVsFullChains(t *testing.T, scheme string) {
for i := 0; i < len(blocks); i++ { for i := 0; i < len(blocks); i++ {
num, hash, time := blocks[i].NumberU64(), blocks[i].Hash(), blocks[i].Time() num, hash, time := blocks[i].NumberU64(), blocks[i].Hash(), blocks[i].Time()
if ftd, atd := fast.GetTd(hash, num), archive.GetTd(hash, num); ftd.Cmp(atd) != 0 {
t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd)
}
if antd, artd := ancient.GetTd(hash, num), archive.GetTd(hash, num); antd.Cmp(artd) != 0 {
t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd)
}
if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() { if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() {
t.Errorf("block #%d [%x]: header mismatch: fastdb %v, archivedb %v", num, hash, fheader, aheader) t.Errorf("block #%d [%x]: header mismatch: fastdb %v, archivedb %v", num, hash, fheader, aheader)
} }
@ -4038,7 +3987,7 @@ func TestEIP3651(t *testing.T) {
var ( var (
aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa")
bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb")
engine = beacon.NewFaker() engine = beacon.New(ethash.NewFaker())
// A sender who makes transactions, has some funds // A sender who makes transactions, has some funds
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@ -4154,7 +4103,7 @@ func TestPragueRequests(t *testing.T) {
addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr1 = crypto.PubkeyToAddress(key1.PublicKey)
config = *params.MergedTestChainConfig config = *params.MergedTestChainConfig
signer = types.LatestSigner(&config) signer = types.LatestSigner(&config)
engine = beacon.NewFaker() engine = beacon.New(ethash.NewFaker())
) )
gspec := &Genesis{ gspec := &Genesis{
Config: &config, Config: &config,
@ -4232,7 +4181,7 @@ func TestEIP7702(t *testing.T) {
var ( var (
config = *params.MergedTestChainConfig config = *params.MergedTestChainConfig
signer = types.LatestSigner(&config) signer = types.LatestSigner(&config)
engine = beacon.NewFaker() engine = beacon.New(ethash.NewFaker())
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a")
addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr1 = crypto.PubkeyToAddress(key1.PublicKey)

@ -726,7 +726,3 @@ func (cm *chainMaker) GetHeader(hash common.Hash, number uint64) *types.Header {
func (cm *chainMaker) GetBlock(hash common.Hash, number uint64) *types.Block { func (cm *chainMaker) GetBlock(hash common.Hash, number uint64) *types.Block {
return cm.blockByNumber(number) return cm.blockByNumber(number)
} }
func (cm *chainMaker) GetTd(hash common.Hash, number uint64) *big.Int {
return nil // not supported
}

@ -80,8 +80,9 @@ func TestGeneratePOSChain(t *testing.T) {
Code: common.Hex2Bytes("600154600354"), Code: common.Hex2Bytes("600154600354"),
} }
genesis := gspec.MustCommit(gendb, triedb.NewDatabase(gendb, triedb.HashDefaults)) genesis := gspec.MustCommit(gendb, triedb.NewDatabase(gendb, triedb.HashDefaults))
engine := beacon.New(ethash.NewFaker())
genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) { genchain, genreceipts := GenerateChain(gspec.Config, genesis, engine, gendb, 4, func(i int, gen *BlockGen) {
gen.SetParentBeaconRoot(common.Hash{byte(i + 1)}) gen.SetParentBeaconRoot(common.Hash{byte(i + 1)})
// Add value transfer tx. // Add value transfer tx.
@ -122,7 +123,7 @@ func TestGeneratePOSChain(t *testing.T) {
}) })
// Import the chain. This runs all block validation rules. // Import the chain. This runs all block validation rules.
blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil) blockchain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil)
defer blockchain.Stop() defer blockchain.Stop()
if i, err := blockchain.InsertChain(genchain); err != nil { if i, err := blockchain.InsertChain(genchain); err != nil {

@ -530,7 +530,6 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *triedb.Database) (*types.Blo
} }
batch := db.NewBatch() batch := db.NewBatch()
rawdb.WriteGenesisStateSpec(batch, block.Hash(), blob) rawdb.WriteGenesisStateSpec(batch, block.Hash(), blob)
rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteBlock(batch, block) rawdb.WriteBlock(batch, block)
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), nil) rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), nil)
rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64()) rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())

@ -191,7 +191,7 @@ func TestGenesisHashes(t *testing.T) {
} }
} }
func TestGenesis_Commit(t *testing.T) { func TestGenesisCommit(t *testing.T) {
genesis := &Genesis{ genesis := &Genesis{
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
Config: params.TestChainConfig, Config: params.TestChainConfig,
@ -209,13 +209,6 @@ func TestGenesis_Commit(t *testing.T) {
if genesisBlock.Difficulty().Cmp(params.GenesisDifficulty) != 0 { if genesisBlock.Difficulty().Cmp(params.GenesisDifficulty) != 0 {
t.Errorf("assumption wrong: want: %d, got: %v", params.GenesisDifficulty, genesisBlock.Difficulty()) t.Errorf("assumption wrong: want: %d, got: %v", params.GenesisDifficulty, genesisBlock.Difficulty())
} }
// Expect the stored total difficulty to be the difficulty of the genesis block.
stored := rawdb.ReadTd(db, genesisBlock.Hash(), genesisBlock.NumberU64())
if stored.Cmp(genesisBlock.Difficulty()) != 0 {
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
}
} }
func TestReadWriteGenesisAlloc(t *testing.T) { func TestReadWriteGenesisAlloc(t *testing.T) {

@ -19,7 +19,6 @@ package core
import ( import (
"errors" "errors"
"fmt" "fmt"
"math/big"
"sync/atomic" "sync/atomic"
"time" "time"
@ -36,7 +35,6 @@ import (
const ( const (
headerCacheLimit = 512 headerCacheLimit = 512
tdCacheLimit = 1024
numberCacheLimit = 2048 numberCacheLimit = 2048
) )
@ -65,8 +63,7 @@ type HeaderChain struct {
currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time)
headerCache *lru.Cache[common.Hash, *types.Header] headerCache *lru.Cache[common.Hash, *types.Header]
tdCache *lru.Cache[common.Hash, *big.Int] // most recent total difficulties numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers
numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers
procInterrupt func() bool procInterrupt func() bool
engine consensus.Engine engine consensus.Engine
@ -79,7 +76,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
config: config, config: config,
chainDb: chainDb, chainDb: chainDb,
headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit), headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit),
tdCache: lru.NewCache[common.Hash, *big.Int](tdCacheLimit),
numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit), numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit),
procInterrupt: procInterrupt, procInterrupt: procInterrupt,
engine: engine, engine: engine,
@ -197,14 +193,12 @@ func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
if len(headers) == 0 { if len(headers) == 0 {
return 0, nil return 0, nil
} }
ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) if !hc.HasHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) {
if ptd == nil {
return 0, consensus.ErrUnknownAncestor return 0, consensus.ErrUnknownAncestor
} }
var ( var (
newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain inserted []rawdb.NumberHash // Ephemeral lookup of number/hash for the chain
inserted []rawdb.NumberHash // Ephemeral lookup of number/hash for the chain parentKnown = true // Set to true to force hc.HasHeader check the first iteration
parentKnown = true // Set to true to force hc.HasHeader check the first iteration
batch = hc.chainDb.NewBatch() batch = hc.chainDb.NewBatch()
) )
for i, header := range headers { for i, header := range headers {
@ -218,16 +212,11 @@ func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
hash = header.Hash() hash = header.Hash()
} }
number := header.Number.Uint64() number := header.Number.Uint64()
newTD.Add(newTD, header.Difficulty)
// If the parent was not present, store it // If the parent was not present, store it
// If the header is already known, skip it, otherwise store // If the header is already known, skip it, otherwise store
alreadyKnown := parentKnown && hc.HasHeader(hash, number) alreadyKnown := parentKnown && hc.HasHeader(hash, number)
if !alreadyKnown { if !alreadyKnown {
// Irrelevant of the canonical status, write the TD and header to the database.
rawdb.WriteTd(batch, hash, number, newTD)
hc.tdCache.Add(hash, new(big.Int).Set(newTD))
rawdb.WriteHeader(batch, header) rawdb.WriteHeader(batch, header)
inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash}) inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash})
hc.headerCache.Add(hash, header) hc.headerCache.Add(hash, header)
@ -392,22 +381,6 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma
return hash, number return hash, number
} }
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash and number, caching it if found.
func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
// Short circuit if the td's already in the cache, retrieve otherwise
if cached, ok := hc.tdCache.Get(hash); ok {
return cached
}
td := rawdb.ReadTd(hc.chainDb, hash, number)
if td == nil {
return nil
}
// Cache the found body for next time and return
hc.tdCache.Add(hash, td)
return td
}
// GetHeader retrieves a block header from the database by hash and number, // GetHeader retrieves a block header from the database by hash and number,
// caching it if found. // caching it if found.
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
@ -620,7 +593,6 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
delFn(batch, hash, num) delFn(batch, hash, num)
} }
rawdb.DeleteHeader(batch, hash, num) rawdb.DeleteHeader(batch, hash, num)
rawdb.DeleteTd(batch, hash, num)
} }
rawdb.DeleteCanonicalHash(batch, num) rawdb.DeleteCanonicalHash(batch, num)
} }
@ -631,7 +603,6 @@ func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn Updat
} }
// Clear out any stale content from the caches // Clear out any stale content from the caches
hc.headerCache.Purge() hc.headerCache.Purge()
hc.tdCache.Purge()
hc.numberCache.Purge() hc.numberCache.Purge()
} }

@ -39,10 +39,6 @@ func verifyUnbrokenCanonchain(hc *HeaderChain) error {
return fmt.Errorf("Canon hash chain broken, block %d got %x, expected %x", return fmt.Errorf("Canon hash chain broken, block %d got %x, expected %x",
h.Number, canonHash[:8], exp[:8]) h.Number, canonHash[:8], exp[:8])
} }
// Verify that we have the TD
if td := rawdb.ReadTd(hc.chainDb, canonHash, h.Number.Uint64()); td == nil {
return fmt.Errorf("Canon TD missing at block %d", h.Number)
}
if h.Number.Uint64() == 0 { if h.Number.Uint64() == 0 {
break break
} }

@ -508,54 +508,6 @@ func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
} }
} }
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
db.ReadAncients(func(reader ethdb.AncientReaderOp) error {
// Check if the data is in ancients
if isCanon(reader, number, hash) {
data, _ = reader.Ancient(ChainFreezerDifficultyTable, number)
return nil
}
// If not, try reading from leveldb
data, _ = db.Get(headerTDKey(number, hash))
return nil
})
return data
}
// ReadTd retrieves a block's total difficulty corresponding to the hash.
func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
data := ReadTdRLP(db, hash, number)
if len(data) == 0 {
return nil
}
td := new(big.Int)
if err := rlp.DecodeBytes(data, td); err != nil {
log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
return nil
}
return td
}
// WriteTd stores the total difficulty of a block into the database.
func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
data, err := rlp.EncodeToBytes(td)
if err != nil {
log.Crit("Failed to RLP encode block total difficulty", "err", err)
}
if err := db.Put(headerTDKey(number, hash), data); err != nil {
log.Crit("Failed to store block total difficulty", "err", err)
}
}
// DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
if err := db.Delete(headerTDKey(number, hash)); err != nil {
log.Crit("Failed to delete block total difficulty", "err", err)
}
}
// HasReceipts verifies the existence of all the transaction receipts belonging // HasReceipts verifies the existence of all the transaction receipts belonging
// to a block. // to a block.
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
@ -741,11 +693,9 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
} }
// WriteAncientBlocks writes entire block data into ancient store and returns the total written size. // WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) { func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts) (int64, error) {
var ( var stReceipts []*types.ReceiptForStorage
tdSum = new(big.Int).Set(td)
stReceipts []*types.ReceiptForStorage
)
return db.ModifyAncients(func(op ethdb.AncientWriteOp) error { return db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
for i, block := range blocks { for i, block := range blocks {
// Convert receipts to storage format and sum up total difficulty. // Convert receipts to storage format and sum up total difficulty.
@ -754,10 +704,7 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts
stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt)) stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt))
} }
header := block.Header() header := block.Header()
if i > 0 { if err := writeAncientBlock(op, block, header, stReceipts); err != nil {
tdSum.Add(tdSum, header.Difficulty)
}
if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil {
return err return err
} }
} }
@ -765,7 +712,7 @@ func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts
}) })
} }
func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error { func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage) error {
num := block.NumberU64() num := block.NumberU64()
if err := op.AppendRaw(ChainFreezerHashTable, num, block.Hash().Bytes()); err != nil { if err := op.AppendRaw(ChainFreezerHashTable, num, block.Hash().Bytes()); err != nil {
return fmt.Errorf("can't add block %d hash: %v", num, err) return fmt.Errorf("can't add block %d hash: %v", num, err)
@ -779,9 +726,6 @@ func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *type
if err := op.Append(ChainFreezerReceiptTable, num, receipts); err != nil { if err := op.Append(ChainFreezerReceiptTable, num, receipts); err != nil {
return fmt.Errorf("can't append block %d receipts: %v", num, err) return fmt.Errorf("can't append block %d receipts: %v", num, err)
} }
if err := op.Append(ChainFreezerDifficultyTable, num, td); err != nil {
return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
}
return nil return nil
} }
@ -790,7 +734,6 @@ func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
DeleteReceipts(db, hash, number) DeleteReceipts(db, hash, number)
DeleteHeader(db, hash, number) DeleteHeader(db, hash, number)
DeleteBody(db, hash, number) DeleteBody(db, hash, number)
DeleteTd(db, hash, number)
} }
// DeleteBlockWithoutNumber removes all block data associated with a hash, except // DeleteBlockWithoutNumber removes all block data associated with a hash, except
@ -799,7 +742,6 @@ func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number
DeleteReceipts(db, hash, number) DeleteReceipts(db, hash, number)
deleteHeaderWithoutNumber(db, hash, number) deleteHeaderWithoutNumber(db, hash, number)
DeleteBody(db, hash, number) DeleteBody(db, hash, number)
DeleteTd(db, hash, number)
} }
const badBlockToKeep = 10 const badBlockToKeep = 10

@ -258,29 +258,6 @@ func TestBadBlockStorage(t *testing.T) {
} }
} }
// Tests block total difficulty storage and retrieval operations.
func TestTdStorage(t *testing.T) {
db := NewMemoryDatabase()
// Create a test TD to move around the database and make sure it's really new
hash, td := common.Hash{}, big.NewInt(314)
if entry := ReadTd(db, hash, 0); entry != nil {
t.Fatalf("Non existent TD returned: %v", entry)
}
// Write and verify the TD in the database
WriteTd(db, hash, 0, td)
if entry := ReadTd(db, hash, 0); entry == nil {
t.Fatalf("Stored TD not found")
} else if entry.Cmp(td) != 0 {
t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td)
}
// Delete the TD and verify the execution
DeleteTd(db, hash, 0)
if entry := ReadTd(db, hash, 0); entry != nil {
t.Fatalf("Deleted TD returned: %v", entry)
}
}
// Tests that canonical numbers can be mapped to hashes and retrieved. // Tests that canonical numbers can be mapped to hashes and retrieved.
func TestCanonicalMappingStorage(t *testing.T) { func TestCanonicalMappingStorage(t *testing.T) {
db := NewMemoryDatabase() db := NewMemoryDatabase()
@ -460,12 +437,9 @@ func TestAncientStorage(t *testing.T) {
if blob := ReadReceiptsRLP(db, hash, number); len(blob) > 0 { if blob := ReadReceiptsRLP(db, hash, number); len(blob) > 0 {
t.Fatalf("non existent receipts returned") t.Fatalf("non existent receipts returned")
} }
if blob := ReadTdRLP(db, hash, number); len(blob) > 0 {
t.Fatalf("non existent td returned")
}
// Write and verify the header in the database // Write and verify the header in the database
WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil}, big.NewInt(100)) WriteAncientBlocks(db, []*types.Block{block}, []types.Receipts{nil})
if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 { if blob := ReadHeaderRLP(db, hash, number); len(blob) == 0 {
t.Fatalf("no header returned") t.Fatalf("no header returned")
@ -476,9 +450,6 @@ func TestAncientStorage(t *testing.T) {
if blob := ReadReceiptsRLP(db, hash, number); len(blob) == 0 { if blob := ReadReceiptsRLP(db, hash, number); len(blob) == 0 {
t.Fatalf("no receipts returned") t.Fatalf("no receipts returned")
} }
if blob := ReadTdRLP(db, hash, number); len(blob) == 0 {
t.Fatalf("no td returned")
}
// Use a fake hash for data retrieval, nothing should be returned. // Use a fake hash for data retrieval, nothing should be returned.
fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03}) fakeHash := common.BytesToHash([]byte{0x01, 0x02, 0x03})
@ -491,9 +462,6 @@ func TestAncientStorage(t *testing.T) {
if blob := ReadReceiptsRLP(db, fakeHash, number); len(blob) != 0 { if blob := ReadReceiptsRLP(db, fakeHash, number); len(blob) != 0 {
t.Fatalf("invalid receipts returned") t.Fatalf("invalid receipts returned")
} }
if blob := ReadTdRLP(db, fakeHash, number); len(blob) != 0 {
t.Fatalf("invalid td returned")
}
} }
func TestCanonicalHashIteration(t *testing.T) { func TestCanonicalHashIteration(t *testing.T) {
@ -518,7 +486,6 @@ func TestCanonicalHashIteration(t *testing.T) {
// Fill database with testing data. // Fill database with testing data.
for i := uint64(1); i <= 8; i++ { for i := uint64(1); i <= 8; i++ {
WriteCanonicalHash(db, common.Hash{}, i) WriteCanonicalHash(db, common.Hash{}, i)
WriteTd(db, common.Hash{}, i, big.NewInt(10)) // Write some interferential data
} }
for i, c := range cases { for i, c := range cases {
numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit) numbers, _ := ReadAllCanonicalHashes(db, c.from, c.to, c.limit)
@ -591,7 +558,6 @@ func BenchmarkWriteAncientBlocks(b *testing.B) {
// The benchmark loop writes batches of blocks, but note that the total block count is // The benchmark loop writes batches of blocks, but note that the total block count is
// b.N. This means the resulting ns/op measurement is the time it takes to write a // b.N. This means the resulting ns/op measurement is the time it takes to write a
// single block and its associated data. // single block and its associated data.
var td = big.NewInt(55)
var totalSize int64 var totalSize int64
for i := 0; i < b.N; i += batchSize { for i := 0; i < b.N; i += batchSize {
length := batchSize length := batchSize
@ -601,7 +567,7 @@ func BenchmarkWriteAncientBlocks(b *testing.B) {
blocks := allBlocks[i : i+length] blocks := allBlocks[i : i+length]
receipts := batchReceipts[:length] receipts := batchReceipts[:length]
writeSize, err := WriteAncientBlocks(db, blocks, receipts, td) writeSize, err := WriteAncientBlocks(db, blocks, receipts)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -900,7 +866,7 @@ func TestHeadersRLPStorage(t *testing.T) {
} }
var receipts []types.Receipts = make([]types.Receipts, 100) var receipts []types.Receipts = make([]types.Receipts, 100)
// Write first half to ancients // Write first half to ancients
WriteAncientBlocks(db, chain[:50], receipts[:50], big.NewInt(100)) WriteAncientBlocks(db, chain[:50], receipts[:50])
// Write second half to db // Write second half to db
for i := 50; i < 100; i++ { for i := 50; i < 100; i++ {
WriteCanonicalHash(db, chain[i].Hash(), chain[i].NumberU64()) WriteCanonicalHash(db, chain[i].Hash(), chain[i].NumberU64())

@ -35,19 +35,15 @@ const (
// ChainFreezerReceiptTable indicates the name of the freezer receipts table. // ChainFreezerReceiptTable indicates the name of the freezer receipts table.
ChainFreezerReceiptTable = "receipts" ChainFreezerReceiptTable = "receipts"
// ChainFreezerDifficultyTable indicates the name of the freezer total difficulty table.
ChainFreezerDifficultyTable = "diffs"
) )
// chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables. // chainFreezerNoSnappy configures whether compression is disabled for the ancient-tables.
// Hashes and difficulties don't compress well. // Hashes and difficulties don't compress well.
var chainFreezerNoSnappy = map[string]bool{ var chainFreezerNoSnappy = map[string]bool{
ChainFreezerHeaderTable: false, ChainFreezerHeaderTable: false,
ChainFreezerHashTable: true, ChainFreezerHashTable: true,
ChainFreezerBodiesTable: false, ChainFreezerBodiesTable: false,
ChainFreezerReceiptTable: false, ChainFreezerReceiptTable: false,
ChainFreezerDifficultyTable: true,
} }
const ( const (

@ -315,11 +315,6 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash
if len(receipts) == 0 { if len(receipts) == 0 {
return fmt.Errorf("block receipts missing, can't freeze block %d", number) return fmt.Errorf("block receipts missing, can't freeze block %d", number)
} }
td := ReadTdRLP(nfdb, hash, number)
if len(td) == 0 {
return fmt.Errorf("total difficulty missing, can't freeze block %d", number)
}
// Write to the batch. // Write to the batch.
if err := op.AppendRaw(ChainFreezerHashTable, number, hash[:]); err != nil { if err := op.AppendRaw(ChainFreezerHashTable, number, hash[:]); err != nil {
return fmt.Errorf("can't write hash to Freezer: %v", err) return fmt.Errorf("can't write hash to Freezer: %v", err)
@ -333,9 +328,6 @@ func (f *chainFreezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hash
if err := op.AppendRaw(ChainFreezerReceiptTable, number, receipts); err != nil { if err := op.AppendRaw(ChainFreezerReceiptTable, number, receipts); err != nil {
return fmt.Errorf("can't write receipts to Freezer: %v", err) return fmt.Errorf("can't write receipts to Freezer: %v", err)
} }
if err := op.AppendRaw(ChainFreezerDifficultyTable, number, td); err != nil {
return fmt.Errorf("can't write td to Freezer: %v", err)
}
hashes = append(hashes, hash) hashes = append(hashes, hash)
} }
return nil return nil

@ -500,7 +500,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Headers", headers.Size(), headers.Count()}, {"Key-Value store", "Headers", headers.Size(), headers.Count()},
{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()}, {"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
{"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()}, {"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
{"Key-Value store", "Difficulties", tds.Size(), tds.Count()}, {"Key-Value store", "Difficulties (deprecated)", tds.Size(), tds.Count()},
{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()}, {"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()}, {"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()}, {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},

@ -98,7 +98,7 @@ var (
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td (deprecated)
headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash headerHashSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + headerHashSuffix -> hash
headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian) headerNumberPrefix = []byte("H") // headerNumberPrefix + hash -> num (uint64 big endian)
@ -174,11 +174,6 @@ func headerKey(number uint64, hash common.Hash) []byte {
return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...) return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
} }
// headerTDKey = headerPrefix + num (uint64 big endian) + hash + headerTDSuffix
func headerTDKey(number uint64, hash common.Hash) []byte {
return append(headerKey(number, hash), headerTDSuffix...)
}
// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix // headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix
func headerHashKey(number uint64) []byte { func headerHashKey(number uint64) []byte {
return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...) return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)

@ -63,7 +63,6 @@ type VMContext struct {
// It contains the block as well as consensus related information. // It contains the block as well as consensus related information.
type BlockEvent struct { type BlockEvent struct {
Block *types.Block Block *types.Block
TD *big.Int
Finalized *types.Header Finalized *types.Header
Safe *types.Header Safe *types.Header
} }

@ -211,7 +211,7 @@ func TestTxIndexer(t *testing.T) {
} }
for _, c := range cases { for _, c := range cases {
db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false) db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), "", "", false)
rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0)) rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...))
// Index the initial blocks from ancient store // Index the initial blocks from ancient store
indexer := &txIndexer{ indexer := &txIndexer{

@ -242,13 +242,6 @@ func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash, number ui
return rawdb.ReadLogs(b.eth.chainDb, hash, number), nil return rawdb.ReadLogs(b.eth.chainDb, hash, number), nil
} }
func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
if header := b.eth.blockchain.GetHeaderByHash(hash); header != nil {
return b.eth.blockchain.GetTd(hash, header.Number.Uint64())
}
return nil
}
func (b *EthAPIBackend) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM { func (b *EthAPIBackend) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM {
if vmConfig == nil { if vmConfig == nil {
vmConfig = b.eth.blockchain.GetVMConfig() vmConfig = b.eth.blockchain.GetVMConfig()

@ -361,21 +361,12 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
} }
// Block is known locally, just sanity check that the beacon client does not // Block is known locally, just sanity check that the beacon client does not
// attempt to push us back to before the merge. // attempt to push us back to before the merge.
if block.Difficulty().BitLen() > 0 || block.NumberU64() == 0 { if block.Difficulty().BitLen() > 0 && block.NumberU64() > 0 {
var ( ph := api.eth.BlockChain().GetHeader(block.ParentHash(), block.NumberU64()-1)
td = api.eth.BlockChain().GetTd(update.HeadBlockHash, block.NumberU64()) if ph == nil {
ptd = api.eth.BlockChain().GetTd(block.ParentHash(), block.NumberU64()-1) return engine.STATUS_INVALID, errors.New("parent unavailable for difficulty check")
ttd = api.eth.BlockChain().Config().TerminalTotalDifficulty
)
if td == nil || (block.NumberU64() > 0 && ptd == nil) {
log.Error("TDs unavailable for TTD check", "number", block.NumberU64(), "hash", update.HeadBlockHash, "td", td, "parent", block.ParentHash(), "ptd", ptd)
return engine.STATUS_INVALID, errors.New("TDs unavailable for TDD check")
} }
if td.Cmp(ttd) < 0 { if ph.Difficulty.Sign() == 0 && block.Difficulty().Sign() > 0 {
log.Error("Refusing beacon update to pre-merge", "number", block.NumberU64(), "hash", update.HeadBlockHash, "diff", block.Difficulty(), "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)))
return engine.ForkChoiceResponse{PayloadStatus: engine.INVALID_TERMINAL_BLOCK, PayloadID: nil}, nil
}
if block.NumberU64() > 0 && ptd.Cmp(ttd) >= 0 {
log.Error("Parent block is already post-ttd", "number", block.NumberU64(), "hash", update.HeadBlockHash, "diff", block.Difficulty(), "age", common.PrettyAge(time.Unix(int64(block.Time()), 0))) log.Error("Parent block is already post-ttd", "number", block.NumberU64(), "hash", update.HeadBlockHash, "diff", block.Difficulty(), "age", common.PrettyAge(time.Unix(int64(block.Time()), 0)))
return engine.ForkChoiceResponse{PayloadStatus: engine.INVALID_TERMINAL_BLOCK, PayloadID: nil}, nil return engine.ForkChoiceResponse{PayloadStatus: engine.INVALID_TERMINAL_BLOCK, PayloadID: nil}, nil
} }
@ -901,21 +892,6 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
if parent == nil { if parent == nil {
return api.delayPayloadImport(block), nil return api.delayPayloadImport(block), nil
} }
// We have an existing parent, do some sanity checks to avoid the beacon client
// triggering too early
var (
ptd = api.eth.BlockChain().GetTd(parent.Hash(), parent.NumberU64())
ttd = api.eth.BlockChain().Config().TerminalTotalDifficulty
gptd = api.eth.BlockChain().GetTd(parent.ParentHash(), parent.NumberU64()-1)
)
if ptd.Cmp(ttd) < 0 {
log.Warn("Ignoring pre-merge payload", "number", params.Number, "hash", params.BlockHash, "td", ptd, "ttd", ttd)
return engine.INVALID_TERMINAL_BLOCK, nil
}
if parent.Difficulty().BitLen() > 0 && gptd != nil && gptd.Cmp(ttd) >= 0 {
log.Error("Ignoring pre-merge parent block", "number", params.Number, "hash", params.BlockHash, "td", ptd, "ttd", ttd)
return engine.INVALID_TERMINAL_BLOCK, nil
}
if block.Time() <= parent.Time() { if block.Time() <= parent.Time() {
log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time()) log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time())
return api.invalid(errors.New("invalid timestamp"), parent.Header()), nil return api.invalid(errors.New("invalid timestamp"), parent.Header()), nil

@ -32,8 +32,7 @@ import (
"github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/beacon"
beaconConsensus "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -62,10 +61,11 @@ var (
func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block) { func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block) {
config := *params.AllEthashProtocolChanges config := *params.AllEthashProtocolChanges
engine := consensus.Engine(beaconConsensus.New(ethash.NewFaker())) engine := beacon.New(ethash.NewFaker())
if merged { if merged {
config.TerminalTotalDifficulty = common.Big0 config.TerminalTotalDifficulty = common.Big0
engine = beaconConsensus.NewFaker() } else {
engine.TestingTTDBlock(uint64(n))
} }
genesis := &core.Genesis{ genesis := &core.Genesis{
Config: &config, Config: &config,
@ -101,7 +101,6 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block) {
} }
config.TerminalTotalDifficulty = totalDifficulty config.TerminalTotalDifficulty = totalDifficulty
} }
return genesis, blocks return genesis, blocks
} }
@ -164,24 +163,6 @@ func TestEth2AssembleBlockWithAnotherBlocksTxs(t *testing.T) {
} }
} }
func TestSetHeadBeforeTotalDifficulty(t *testing.T) {
genesis, blocks := generateMergeChain(10, false)
n, ethservice := startEthService(t, genesis, blocks)
defer n.Close()
api := NewConsensusAPI(ethservice)
fcState := engine.ForkchoiceStateV1{
HeadBlockHash: blocks[5].Hash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
}
if resp, err := api.ForkchoiceUpdatedV1(fcState, nil); err != nil {
t.Errorf("fork choice updated should not error: %v", err)
} else if resp.PayloadStatus.Status != engine.INVALID_TERMINAL_BLOCK.Status {
t.Errorf("fork choice updated before total terminal difficulty should be INVALID")
}
}
func TestEth2PrepareAndGetPayload(t *testing.T) { func TestEth2PrepareAndGetPayload(t *testing.T) {
genesis, blocks := generateMergeChain(10, false) genesis, blocks := generateMergeChain(10, false)
// We need to properly set the terminal total difficulty // We need to properly set the terminal total difficulty
@ -902,70 +883,6 @@ func TestInvalidBloom(t *testing.T) {
} }
} }
func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) {
genesis, preMergeBlocks := generateMergeChain(100, false)
n, ethservice := startEthService(t, genesis, preMergeBlocks)
defer n.Close()
api := NewConsensusAPI(ethservice)
// Test parent already post TTD in FCU
parent := preMergeBlocks[len(preMergeBlocks)-2]
fcState := engine.ForkchoiceStateV1{
HeadBlockHash: parent.Hash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
}
resp, err := api.ForkchoiceUpdatedV1(fcState, nil)
if err != nil {
t.Fatalf("error sending forkchoice, err=%v", err)
}
if resp.PayloadStatus != engine.INVALID_TERMINAL_BLOCK {
t.Fatalf("error sending invalid forkchoice, invalid status: %v", resp.PayloadStatus.Status)
}
// Test parent already post TTD in NewPayload
args := &miner.BuildPayloadArgs{
Parent: parent.Hash(),
Timestamp: parent.Time() + 1,
Random: crypto.Keccak256Hash([]byte{byte(1)}),
FeeRecipient: parent.Coinbase(),
}
payload, err := api.eth.Miner().BuildPayload(args, false)
if err != nil {
t.Fatalf("error preparing payload, err=%v", err)
}
data := *payload.Resolve().ExecutionPayload
// We need to recompute the blockhash, since the miner computes a wrong (correct) blockhash
txs, _ := decodeTransactions(data.Transactions)
header := &types.Header{
ParentHash: data.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: data.FeeRecipient,
Root: data.StateRoot,
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
ReceiptHash: data.ReceiptsRoot,
Bloom: types.BytesToBloom(data.LogsBloom),
Difficulty: common.Big0,
Number: new(big.Int).SetUint64(data.Number),
GasLimit: data.GasLimit,
GasUsed: data.GasUsed,
Time: data.Timestamp,
BaseFee: data.BaseFeePerGas,
Extra: data.ExtraData,
MixDigest: data.Random,
}
block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs})
data.BlockHash = block.Hash()
// Send the new payload
resp2, err := api.NewPayloadV1(data)
if err != nil {
t.Fatalf("error sending NewPayload, err=%v", err)
}
if resp2 != engine.INVALID_TERMINAL_BLOCK {
t.Fatalf("error sending invalid forkchoice, invalid status: %v", resp.PayloadStatus.Status)
}
}
// TestSimultaneousNewBlock does several parallel inserts, both as // TestSimultaneousNewBlock does several parallel inserts, both as
// newPayLoad and forkchoiceUpdate. This is to test that the api behaves // newPayLoad and forkchoiceUpdate. This is to test that the api behaves
// well even of the caller is not being 'serial'. // well even of the caller is not being 'serial'.

@ -20,7 +20,6 @@ package downloader
import ( import (
"errors" "errors"
"fmt" "fmt"
"math/big"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -164,9 +163,6 @@ type BlockChain interface {
// CurrentHeader retrieves the head header from the local chain. // CurrentHeader retrieves the head header from the local chain.
CurrentHeader() *types.Header CurrentHeader() *types.Header
// GetTd returns the total difficulty of a local block.
GetTd(common.Hash, uint64) *big.Int
// InsertHeaderChain inserts a batch of headers into the local chain. // InsertHeaderChain inserts a batch of headers into the local chain.
InsertHeaderChain([]*types.Header) (int, error) InsertHeaderChain([]*types.Header) (int, error)

@ -126,13 +126,6 @@ type downloadTesterPeer struct {
chain *core.BlockChain chain *core.BlockChain
} }
// Head constructs a function to retrieve a peer's current head hash
// and total difficulty.
func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {
head := dlp.chain.CurrentBlock()
return head.Hash(), dlp.chain.GetTd(head.Hash(), head.Number.Uint64())
}
func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header {
var headers = make([]*types.Header, len(rlpdata)) var headers = make([]*types.Header, len(rlpdata))
for i, data := range rlpdata { for i, data := range rlpdata {

@ -21,7 +21,6 @@ package downloader
import ( import (
"errors" "errors"
"math/big"
"sync" "sync"
"time" "time"
@ -57,7 +56,6 @@ type peerConnection struct {
// Peer encapsulates the methods required to synchronise with a remote full peer. // Peer encapsulates the methods required to synchronise with a remote full peer.
type Peer interface { type Peer interface {
Head() (common.Hash, *big.Int)
RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByHash(common.Hash, int, int, bool, chan *eth.Response) (*eth.Request, error)
RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error) RequestHeadersByNumber(uint64, int, int, bool, chan *eth.Response) (*eth.Request, error)

@ -25,7 +25,6 @@ import (
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -148,7 +147,10 @@ func newTestBackend(t *testing.T, londonBlock *big.Int, cancunBlock *big.Int, pe
config.LondonBlock = londonBlock config.LondonBlock = londonBlock
config.ArrowGlacierBlock = londonBlock config.ArrowGlacierBlock = londonBlock
config.GrayGlacierBlock = londonBlock config.GrayGlacierBlock = londonBlock
var engine consensus.Engine = beacon.New(ethash.NewFaker())
engine := beacon.New(ethash.NewFaker())
engine.TestingTTDBlock(testHead + 1)
td := params.GenesisDifficulty.Uint64() td := params.GenesisDifficulty.Uint64()
if cancunBlock != nil { if cancunBlock != nil {

@ -253,10 +253,9 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {
head = h.chain.CurrentHeader() head = h.chain.CurrentHeader()
hash = head.Hash() hash = head.Hash()
number = head.Number.Uint64() number = head.Number.Uint64()
td = h.chain.GetTd(hash, number)
) )
forkID := forkid.NewID(h.chain.Config(), genesis, number, head.Time) forkID := forkid.NewID(h.chain.Config(), genesis, number, head.Time)
if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil { if err := peer.Handshake(h.networkID, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {
peer.Log().Debug("Ethereum handshake failed", "err", err) peer.Log().Debug("Ethereum handshake failed", "err", err)
return err return err
} }

@ -260,9 +260,8 @@ func testRecvTransactions(t *testing.T, protocol uint) {
var ( var (
genesis = handler.chain.Genesis() genesis = handler.chain.Genesis()
head = handler.chain.CurrentBlock() head = handler.chain.CurrentBlock()
td = handler.chain.GetTd(head.Hash(), head.Number.Uint64())
) )
if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { if err := src.Handshake(1, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
t.Fatalf("failed to run protocol handshake") t.Fatalf("failed to run protocol handshake")
} }
// Send the transaction to the sink and verify that it's added to the tx pool // Send the transaction to the sink and verify that it's added to the tx pool
@ -320,9 +319,8 @@ func testSendTransactions(t *testing.T, protocol uint) {
var ( var (
genesis = handler.chain.Genesis() genesis = handler.chain.Genesis()
head = handler.chain.CurrentBlock() head = handler.chain.CurrentBlock()
td = handler.chain.GetTd(head.Hash(), head.Number.Uint64())
) )
if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { if err := sink.Handshake(1, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {
t.Fatalf("failed to run protocol handshake") t.Fatalf("failed to run protocol handshake")
} }
// After the handshake completes, the source handler should stream the sink // After the handshake completes, the source handler should stream the sink

@ -18,7 +18,6 @@ package eth
import ( import (
"fmt" "fmt"
"math/big"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -120,11 +119,10 @@ func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2
// NodeInfo represents a short summary of the `eth` sub-protocol metadata // NodeInfo represents a short summary of the `eth` sub-protocol metadata
// known about the host peer. // known about the host peer.
type NodeInfo struct { type NodeInfo struct {
Network uint64 `json:"network"` // Ethereum network ID (1=Mainnet, Holesky=17000) Network uint64 `json:"network"` // Ethereum network ID (1=Mainnet, Holesky=17000)
Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block
Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules
Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules Head common.Hash `json:"head"` // Hex hash of the host's best owned block
Head common.Hash `json:"head"` // Hex hash of the host's best owned block
} }
// nodeInfo retrieves some `eth` protocol metadata about the running host node. // nodeInfo retrieves some `eth` protocol metadata about the running host node.
@ -133,11 +131,10 @@ func nodeInfo(chain *core.BlockChain, network uint64) *NodeInfo {
hash := head.Hash() hash := head.Hash()
return &NodeInfo{ return &NodeInfo{
Network: network, Network: network,
Difficulty: chain.GetTd(hash, head.Number.Uint64()), Genesis: chain.Genesis().Hash(),
Genesis: chain.Genesis().Hash(), Config: chain.Config(),
Config: chain.Config(), Head: hash,
Head: hash,
} }
} }

@ -25,7 +25,6 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -71,12 +70,13 @@ func newTestBackend(blocks int) *testBackend {
func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend { func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend {
var ( var (
// Create a database pre-initialize with a genesis block // Create a database pre-initialize with a genesis block
db = rawdb.NewMemoryDatabase() db = rawdb.NewMemoryDatabase()
config = params.TestChainConfig config = params.TestChainConfig
engine consensus.Engine = ethash.NewFaker() engine = beacon.New(ethash.NewFaker())
) )
if !shanghai {
if shanghai { engine.TestingTTDBlock(math.MaxUint64)
} else {
config = &params.ChainConfig{ config = &params.ChainConfig{
ChainID: big.NewInt(1), ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0), HomesteadBlock: big.NewInt(0),
@ -99,7 +99,6 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
TerminalTotalDifficulty: big.NewInt(0), TerminalTotalDifficulty: big.NewInt(0),
Ethash: new(params.EthashConfig), Ethash: new(params.EthashConfig),
} }
engine = beacon.NewFaker()
} }
gspec := &core.Genesis{ gspec := &core.Genesis{

@ -36,7 +36,7 @@ const (
// Handshake executes the eth protocol handshake, negotiating version number, // Handshake executes the eth protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks. // network IDs, difficulties, head and genesis blocks.
func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error { func (p *Peer) Handshake(network uint64, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {
// Send out own handshake in a new thread // Send out own handshake in a new thread
errc := make(chan error, 2) errc := make(chan error, 2)
@ -46,7 +46,7 @@ func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
errc <- p2p.Send(p.rw, StatusMsg, &StatusPacket{ errc <- p2p.Send(p.rw, StatusMsg, &StatusPacket{
ProtocolVersion: uint32(p.version), ProtocolVersion: uint32(p.version),
NetworkID: network, NetworkID: network,
TD: td, TD: new(big.Int), // unknown for post-merge tail=pruned networks
Head: head, Head: head,
Genesis: genesis, Genesis: genesis,
ForkID: forkID, ForkID: forkID,
@ -69,13 +69,6 @@ func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
return p2p.DiscReadTimeout return p2p.DiscReadTimeout
} }
} }
p.td, p.head = status.TD, status.Head
// TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times
// larger, it will still fit within 100 bits
if tdlen := p.td.BitLen(); tdlen > 100 {
return fmt.Errorf("too large total difficulty: bitlen %d", tdlen)
}
return nil return nil
} }

@ -18,6 +18,7 @@ package eth
import ( import (
"errors" "errors"
"math/big"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -39,7 +40,6 @@ func testHandshake(t *testing.T, protocol uint) {
var ( var (
genesis = backend.chain.Genesis() genesis = backend.chain.Genesis()
head = backend.chain.CurrentBlock() head = backend.chain.CurrentBlock()
td = backend.chain.GetTd(head.Hash(), head.Number.Uint64())
forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time) forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time)
) )
tests := []struct { tests := []struct {
@ -52,19 +52,19 @@ func testHandshake(t *testing.T, protocol uint) {
want: errNoStatusMsg, want: errNoStatusMsg,
}, },
{ {
code: StatusMsg, data: StatusPacket{10, 1, td, head.Hash(), genesis.Hash(), forkID}, code: StatusMsg, data: StatusPacket{10, 1, new(big.Int), head.Hash(), genesis.Hash(), forkID},
want: errProtocolVersionMismatch, want: errProtocolVersionMismatch,
}, },
{ {
code: StatusMsg, data: StatusPacket{uint32(protocol), 999, td, head.Hash(), genesis.Hash(), forkID}, code: StatusMsg, data: StatusPacket{uint32(protocol), 999, new(big.Int), head.Hash(), genesis.Hash(), forkID},
want: errNetworkIDMismatch, want: errNetworkIDMismatch,
}, },
{ {
code: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), common.Hash{3}, forkID}, code: StatusMsg, data: StatusPacket{uint32(protocol), 1, new(big.Int), head.Hash(), common.Hash{3}, forkID},
want: errGenesisMismatch, want: errGenesisMismatch,
}, },
{ {
code: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}}, code: StatusMsg, data: StatusPacket{uint32(protocol), 1, new(big.Int), head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}},
want: errForkIDRejected, want: errForkIDRejected,
}, },
} }
@ -80,7 +80,7 @@ func testHandshake(t *testing.T, protocol uint) {
// Send the junk test with one peer, check the handshake failure // Send the junk test with one peer, check the handshake failure
go p2p.Send(app, test.code, test.data) go p2p.Send(app, test.code, test.data)
err := peer.Handshake(1, td, head.Hash(), genesis.Hash(), forkID, forkid.NewFilter(backend.chain)) err := peer.Handshake(1, head.Hash(), genesis.Hash(), forkID, forkid.NewFilter(backend.chain))
if err == nil { if err == nil {
t.Errorf("test %d: protocol returned nil error, want %q", i, test.want) t.Errorf("test %d: protocol returned nil error, want %q", i, test.want)
} else if !errors.Is(err, test.want) { } else if !errors.Is(err, test.want) {

@ -17,9 +17,7 @@
package eth package eth
import ( import (
"math/big"
"math/rand" "math/rand"
"sync"
mapset "github.com/deckarep/golang-set/v2" mapset "github.com/deckarep/golang-set/v2"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -50,9 +48,6 @@ type Peer struct {
rw p2p.MsgReadWriter // Input/output streams for snap rw p2p.MsgReadWriter // Input/output streams for snap
version uint // Protocol version negotiated version uint // Protocol version negotiated
head common.Hash // Latest advertised head block hash
td *big.Int // Latest advertised head block total difficulty
txpool TxPool // Transaction pool used by the broadcasters for liveness checks txpool TxPool // Transaction pool used by the broadcasters for liveness checks
knownTxs *knownCache // Set of transaction hashes known to be known by this peer knownTxs *knownCache // Set of transaction hashes known to be known by this peer
txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
@ -63,7 +58,6 @@ type Peer struct {
resDispatch chan *response // Dispatch channel to fulfil pending requests and untrack them resDispatch chan *response // Dispatch channel to fulfil pending requests and untrack them
term chan struct{} // Termination channel to stop the broadcasters term chan struct{} // Termination channel to stop the broadcasters
lock sync.RWMutex // Mutex protecting the internal fields
} }
// NewPeer creates a wrapper for a network connection and negotiated protocol // NewPeer creates a wrapper for a network connection and negotiated protocol
@ -108,24 +102,6 @@ func (p *Peer) Version() uint {
return p.version return p.version
} }
// Head retrieves the current head hash and total difficulty of the peer.
func (p *Peer) Head() (hash common.Hash, td *big.Int) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.head[:])
return hash, new(big.Int).Set(p.td)
}
// SetHead updates the head hash and total difficulty of the peer.
func (p *Peer) SetHead(hash common.Hash, td *big.Int) {
p.lock.Lock()
defer p.lock.Unlock()
copy(p.head[:], hash[:])
p.td.Set(td)
}
// KnownTransaction returns whether peer is known to already have a transaction. // KnownTransaction returns whether peer is known to already have a transaction.
func (p *Peer) KnownTransaction(hash common.Hash) bool { func (p *Peer) KnownTransaction(hash common.Hash) bool {
return p.knownTxs.Contains(hash) return p.knownTxs.Contains(hash)

@ -1089,7 +1089,7 @@ func TestTraceChain(t *testing.T) {
func newTestMergedBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { func newTestMergedBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {
backend := &testBackend{ backend := &testBackend{
chainConfig: gspec.Config, chainConfig: gspec.Config,
engine: beacon.NewFaker(), engine: beacon.New(ethash.NewFaker()),
chaindb: rawdb.NewMemoryDatabase(), chaindb: rawdb.NewMemoryDatabase(),
} }
// Generate blocks for testing // Generate blocks for testing

@ -544,9 +544,8 @@ func TestSupplySelfdestructItselfAndRevert(t *testing.T) {
} }
func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockGen)) ([]supplyInfo, *core.BlockChain, error) { func testSupplyTracer(t *testing.T, genesis *core.Genesis, gen func(*core.BlockGen)) ([]supplyInfo, *core.BlockChain, error) {
var ( engine := beacon.New(ethash.NewFaker())
engine = beacon.New(ethash.NewFaker()) engine.TestingTTDBlock(1)
)
traceOutputPath := filepath.ToSlash(t.TempDir()) traceOutputPath := filepath.ToSlash(t.TempDir())
traceOutputFilename := path.Join(traceOutputPath, "supply.jsonl") traceOutputFilename := path.Join(traceOutputPath, "supply.jsonl")

@ -65,7 +65,6 @@ type backend interface {
SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription
CurrentHeader() *types.Header CurrentHeader() *types.Header
HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)
GetTd(ctx context.Context, hash common.Hash) *big.Int
Stats() (pending int, queued int) Stats() (pending int, queued int)
SyncProgress() ethereum.SyncProgress SyncProgress() ethereum.SyncProgress
} }
@ -628,7 +627,6 @@ func (s *Service) reportBlock(conn *connWrapper, header *types.Header) error {
func (s *Service) assembleBlockStats(header *types.Header) *blockStats { func (s *Service) assembleBlockStats(header *types.Header) *blockStats {
// Gather the block infos from the local blockchain // Gather the block infos from the local blockchain
var ( var (
td *big.Int
txs []txStats txs []txStats
uncles []*types.Header uncles []*types.Header
) )
@ -644,8 +642,6 @@ func (s *Service) assembleBlockStats(header *types.Header) *blockStats {
if block == nil { if block == nil {
return nil return nil
} }
td = fullBackend.GetTd(context.Background(), header.Hash())
txs = make([]txStats, len(block.Transactions())) txs = make([]txStats, len(block.Transactions()))
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {
txs[i].Hash = tx.Hash() txs[i].Hash = tx.Hash()
@ -656,7 +652,6 @@ func (s *Service) assembleBlockStats(header *types.Header) *blockStats {
if header == nil { if header == nil {
header = s.backend.CurrentHeader() header = s.backend.CurrentHeader()
} }
td = s.backend.GetTd(context.Background(), header.Hash())
txs = []txStats{} txs = []txStats{}
} }
// Assemble and return the block stats // Assemble and return the block stats
@ -671,7 +666,7 @@ func (s *Service) assembleBlockStats(header *types.Header) *blockStats {
GasUsed: header.GasUsed, GasUsed: header.GasUsed,
GasLimit: header.GasLimit, GasLimit: header.GasLimit,
Diff: header.Difficulty.String(), Diff: header.Difficulty.String(),
TotalDiff: td.String(), TotalDiff: "0", // unknown post-merge with pruned chain tail
Txs: txs, Txs: txs,
TxHash: header.TxHash, TxHash: header.TxHash,
Root: header.Root, Root: header.Root,

@ -29,7 +29,6 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/beacon" "github.com/ethereum/go-ethereum/consensus/beacon"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
@ -457,9 +456,8 @@ func newGQLService(t *testing.T, stack *node.Node, shanghai bool, gspec *core.Ge
RPCGasCap: 1000000, RPCGasCap: 1000000,
StateScheme: rawdb.HashScheme, StateScheme: rawdb.HashScheme,
} }
var engine consensus.Engine = ethash.NewFaker() var engine = beacon.New(ethash.NewFaker())
if shanghai { if shanghai {
engine = beacon.NewFaker()
gspec.Config.TerminalTotalDifficulty = common.Big0 gspec.Config.TerminalTotalDifficulty = common.Big0
// GenerateChain will increment timestamps by 10. // GenerateChain will increment timestamps by 10.
// Shanghai upgrade at block 1. // Shanghai upgrade at block 1.
@ -468,6 +466,7 @@ func newGQLService(t *testing.T, stack *node.Node, shanghai bool, gspec *core.Ge
} else { } else {
// set an arbitrary large ttd as chains are required to be known to be merged // set an arbitrary large ttd as chains are required to be known to be merged
gspec.Config.TerminalTotalDifficulty = big.NewInt(math.MaxInt64) gspec.Config.TerminalTotalDifficulty = big.NewInt(math.MaxInt64)
engine.TestingTTDBlock(math.MaxUint64)
} }
ethBackend, err := eth.New(stack, ethConf) ethBackend, err := eth.New(stack, ethConf)
if err != nil { if err != nil {

@ -565,12 +565,6 @@ func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.R
receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config()) receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config())
return receipts, nil return receipts, nil
} }
func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {
if b.pending != nil && hash == b.pending.Hash() {
return nil
}
return big.NewInt(1)
}
func (b testBackend) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) *vm.EVM { func (b testBackend) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) *vm.EVM {
if vmConfig == nil { if vmConfig == nil {
vmConfig = b.chain.GetVMConfig() vmConfig = b.chain.GetVMConfig()

@ -369,7 +369,6 @@ func (b *backendMock) GetReceipts(ctx context.Context, hash common.Hash) (types.
func (b *backendMock) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { func (b *backendMock) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) {
return nil, nil return nil, nil
} }
func (b *backendMock) GetTd(ctx context.Context, hash common.Hash) *big.Int { return nil }
func (b *backendMock) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM { func (b *backendMock) GetEVM(ctx context.Context, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockCtx *vm.BlockContext) *vm.EVM {
return nil return nil
} }

@ -60,6 +60,9 @@ func TestBlockchain(t *testing.T) {
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain.json`) bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain.json`)
bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json`) bt.skipLoad(`.*bcFrontierToHomestead/blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json`)
// With chain history removal, TDs become unavailable, this transition tests based on TTD are unrunnable
bt.skipLoad(`.*bcArrowGlacierToParis/powToPosBlockRejection.json`)
bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) {
execBlockTest(t, bt, test) execBlockTest(t, bt, test)
}) })

Loading…
Cancel
Save