Merge pull request #30522 from ethereum/master

Release Geth v1.14.10
release/1.14 v1.14.10
Péter Szilágyi 2 months ago committed by GitHub
commit 1015a42d90
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 1
      .github/CODEOWNERS
  2. 3
      .travis.yml
  3. 6
      beacon/engine/gen_epe.go
  4. 41
      beacon/engine/types.go
  5. 1
      cmd/geth/main.go
  6. 10
      cmd/utils/flags.go
  7. 24
      core/block_validator.go
  8. 2
      core/block_validator_test.go
  9. 120
      core/blockchain.go
  10. 6
      core/blockchain_repair_test.go
  11. 2
      core/blockchain_sethead_test.go
  12. 20
      core/blockchain_snapshot_test.go
  13. 16
      core/blockchain_test.go
  14. 2
      core/genesis.go
  15. 4
      core/rawdb/accessors_trie.go
  16. 4
      core/state/access_events.go
  17. 2
      core/state/statedb.go
  18. 24
      core/stateless.go
  19. 7
      core/stateless/database.go
  20. 63
      core/stateless/encoding.go
  21. 74
      core/stateless/gen_encoding_json.go
  22. 81
      core/stateless/witness.go
  23. 1
      core/tracing/hooks.go
  24. 4
      core/txpool/blobpool/blobpool.go
  25. 108
      core/txpool/blobpool/blobpool_test.go
  26. 5
      core/types.go
  27. 8
      core/vm/interpreter.go
  28. 268
      core/vm/testdata/precompiles/blsG1MultiExp.json
  29. 254
      core/vm/testdata/precompiles/blsG2MultiExp.json
  30. 1
      eth/backend.go
  31. 340
      eth/catalyst/api.go
  32. 108
      eth/catalyst/api_test.go
  33. 2
      eth/catalyst/simulated_beacon.go
  34. 3
      eth/ethconfig/config.go
  35. 6
      eth/ethconfig/gen_config.go
  36. 27
      ethdb/pebble/pebble.go
  37. 2
      go.mod
  38. 4
      go.sum
  39. 14
      internal/ethapi/api.go
  40. 6
      miner/miner.go
  41. 67
      miner/payload_building.go
  42. 2
      miner/payload_building_test.go
  43. 23
      miner/worker.go
  44. 26
      p2p/discover/v4_lookup_test.go
  45. 6
      p2p/enode/nodedb.go
  46. 2
      params/version.go
  47. 2
      tests/block_test_util.go

@ -20,5 +20,6 @@ les/ @zsfelfoldi @rjl493456442
light/ @zsfelfoldi @rjl493456442
node/ @fjl
p2p/ @fjl @zsfelfoldi
params/ @fjl @holiman @karalabe @gballet @rjl493456442 @zsfelfoldi
rpc/ @fjl @holiman
signer/ @holiman

@ -85,12 +85,13 @@ jobs:
if: type = push
os: osx
osx_image: xcode14.2
go: 1.23.x
go: 1.23.1 # See https://github.com/ethereum/go-ethereum/pull/30478
env:
- azure-osx
git:
submodules: false # avoid cloning ethereum/tests
script:
- ln -sf /Users/travis/gopath/bin/go1.23.1 /usr/local/bin/go # Work around travis go-setup bug
- go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go install -dlgo -arch arm64

@ -19,12 +19,14 @@ func (e ExecutionPayloadEnvelope) MarshalJSON() ([]byte, error) {
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Override bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness"`
}
var enc ExecutionPayloadEnvelope
enc.ExecutionPayload = e.ExecutionPayload
enc.BlockValue = (*hexutil.Big)(e.BlockValue)
enc.BlobsBundle = e.BlobsBundle
enc.Override = e.Override
enc.Witness = e.Witness
return json.Marshal(&enc)
}
@ -35,6 +37,7 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
BlockValue *hexutil.Big `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Override *bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness"`
}
var dec ExecutionPayloadEnvelope
if err := json.Unmarshal(input, &dec); err != nil {
@ -54,5 +57,8 @@ func (e *ExecutionPayloadEnvelope) UnmarshalJSON(input []byte) error {
if dec.Override != nil {
e.Override = *dec.Override
}
if dec.Witness != nil {
e.Witness = dec.Witness
}
return nil
}

@ -94,6 +94,14 @@ type executableDataMarshaling struct {
ExcessBlobGas *hexutil.Uint64
}
// StatelessPayloadStatusV1 is the result of a stateless payload execution.
type StatelessPayloadStatusV1 struct {
Status string `json:"status"`
StateRoot common.Hash `json:"stateRoot"`
ReceiptsRoot common.Hash `json:"receiptsRoot"`
ValidationError *string `json:"validationError"`
}
//go:generate go run github.com/fjl/gencodec -type ExecutionPayloadEnvelope -field-override executionPayloadEnvelopeMarshaling -out gen_epe.go
type ExecutionPayloadEnvelope struct {
@ -101,6 +109,7 @@ type ExecutionPayloadEnvelope struct {
BlockValue *big.Int `json:"blockValue" gencodec:"required"`
BlobsBundle *BlobsBundleV1 `json:"blobsBundle"`
Override bool `json:"shouldOverrideBuilder"`
Witness *hexutil.Bytes `json:"witness"`
}
type BlobsBundleV1 struct {
@ -115,9 +124,10 @@ type executionPayloadEnvelopeMarshaling struct {
}
type PayloadStatusV1 struct {
Status string `json:"status"`
LatestValidHash *common.Hash `json:"latestValidHash"`
ValidationError *string `json:"validationError"`
Status string `json:"status"`
Witness *hexutil.Bytes `json:"witness"`
LatestValidHash *common.Hash `json:"latestValidHash"`
ValidationError *string `json:"validationError"`
}
type TransitionConfigurationV1 struct {
@ -198,6 +208,20 @@ func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
// Withdrawals value will propagate through the returned block. Empty
// Withdrawals value must be passed via non-nil, length 0 value in data.
func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
block, err := ExecutableDataToBlockNoHash(data, versionedHashes, beaconRoot)
if err != nil {
return nil, err
}
if block.Hash() != data.BlockHash {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", data.BlockHash, block.Hash())
}
return block, nil
}
// ExecutableDataToBlockNoHash is analogous to ExecutableDataToBlock, but is used
// for stateless execution, so it skips checking if the executable data hashes to
// the requested hash (stateless has to *compute* the root hash, it's not given).
func ExecutableDataToBlockNoHash(data ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (*types.Block, error) {
txs, err := decodeTransactions(data.Transactions)
if err != nil {
return nil, err
@ -267,13 +291,10 @@ func ExecutableDataToBlock(data ExecutableData, versionedHashes []common.Hash, b
ParentBeaconRoot: beaconRoot,
RequestsHash: requestsHash,
}
block := types.NewBlockWithHeader(header)
block = block.WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals, Requests: requests})
block = block.WithWitness(data.ExecutionWitness)
if block.Hash() != data.BlockHash {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", data.BlockHash, block.Hash())
}
return block, nil
return types.NewBlockWithHeader(header).
WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: data.Withdrawals, Requests: requests}).
WithWitness(data.ExecutionWitness),
nil
}
// BlockToExecutableData constructs the ExecutableData structure by filling the

@ -156,7 +156,6 @@ var (
utils.BeaconGenesisRootFlag,
utils.BeaconGenesisTimeFlag,
utils.BeaconCheckpointFlag,
utils.CollectWitnessFlag,
}, utils.NetworkFlags, utils.DatabaseFlags)
rpcFlags = []cli.Flag{

@ -600,11 +600,6 @@ var (
Usage: "Disables db compaction after import",
Category: flags.LoggingCategory,
}
CollectWitnessFlag = &cli.BoolFlag{
Name: "collectwitness",
Usage: "Enable state witness generation during block execution. Work in progress flag, don't use.",
Category: flags.MiscCategory,
}
// MISC settings
SyncTargetFlag = &cli.StringFlag{
@ -1312,7 +1307,6 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error
func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) {
if ctx.IsSet(MinerEtherbaseFlag.Name) {
log.Warn("Option --miner.etherbase is deprecated as the etherbase is set by the consensus client post-merge")
return
}
if !ctx.IsSet(MinerPendingFeeRecipientFlag.Name) {
return
@ -1767,9 +1761,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// TODO(fjl): force-enable this in --dev mode
cfg.EnablePreimageRecording = ctx.Bool(VMEnableDebugFlag.Name)
}
if ctx.IsSet(CollectWitnessFlag.Name) {
cfg.EnableWitnessCollection = ctx.Bool(CollectWitnessFlag.Name)
}
if ctx.IsSet(RPCGlobalGasCapFlag.Name) {
cfg.RPCGasCap = ctx.Uint64(RPCGlobalGasCapFlag.Name)
@ -2194,7 +2185,6 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readonly bool) (*core.BlockCh
}
vmcfg := vm.Config{
EnablePreimageRecording: ctx.Bool(VMEnableDebugFlag.Name),
EnableWitnessCollection: ctx.Bool(CollectWitnessFlag.Name),
}
if ctx.IsSet(VMTraceFlag.Name) {
if name := ctx.String(VMTraceFlag.Name); name != "" {

@ -20,10 +20,8 @@ import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
@ -160,28 +158,6 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
return nil
}
// ValidateWitness cross validates a block execution with stateless remote clients.
//
// Normally we'd distribute the block witness to remote cross validators, wait
// for them to respond and then merge the results. For now, however, it's only
// Geth, so do an internal stateless run.
func (v *BlockValidator) ValidateWitness(witness *stateless.Witness, receiptRoot common.Hash, stateRoot common.Hash) error {
// Run the cross client stateless execution
// TODO(karalabe): Self-stateless for now, swap with other clients
crossReceiptRoot, crossStateRoot, err := ExecuteStateless(v.config, witness)
if err != nil {
return fmt.Errorf("stateless execution failed: %v", err)
}
// Stateless cross execution suceeeded, validate the withheld computed fields
if crossReceiptRoot != receiptRoot {
return fmt.Errorf("cross validator receipt root mismatch (cross: %x local: %x)", crossReceiptRoot, receiptRoot)
}
if crossStateRoot != stateRoot {
return fmt.Errorf("cross validator state root mismatch (cross: %x local: %x)", crossStateRoot, stateRoot)
}
return nil
}
// CalcGasLimit computes the gas limit of the next block after parent. It aims
// to keep the baseline gas close to the provided target, and increase it towards
// the target if the baseline gas is lower.

@ -201,7 +201,7 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
t.Fatalf("post-block %d: unexpected result returned: %v", i, result)
case <-time.After(25 * time.Millisecond):
}
chain.InsertBlockWithoutSetHead(postBlocks[i])
chain.InsertBlockWithoutSetHead(postBlocks[i], false)
}
// Verify the blocks with pre-merge blocks and post-merge blocks

@ -78,10 +78,11 @@ var (
snapshotCommitTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil)
triedbCommitTimer = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil)
blockInsertTimer = metrics.NewRegisteredResettingTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredResettingTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredResettingTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredResettingTimer("chain/write", nil)
blockInsertTimer = metrics.NewRegisteredResettingTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredResettingTimer("chain/validation", nil)
blockCrossValidationTimer = metrics.NewRegisteredResettingTimer("chain/crossvalidation", nil)
blockExecutionTimer = metrics.NewRegisteredResettingTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredResettingTimer("chain/write", nil)
blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
@ -1598,7 +1599,9 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
return 0, errChainStopped
}
defer bc.chainmu.Unlock()
return bc.insertChain(chain, true)
_, n, err := bc.insertChain(chain, true, false) // No witness collection for mass inserts (would get super large)
return n, err
}
// insertChain is the internal implementation of InsertChain, which assumes that
@ -1609,10 +1612,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// racey behaviour. If a sidechain import is in progress, and the historic state
// is imported, but then new canon-head is added before the actual sidechain
// completes, then the historic state could be pruned again
func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) {
func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool, makeWitness bool) (*stateless.Witness, int, error) {
// If the chain is terminating, don't even bother starting up.
if bc.insertStopped() {
return 0, nil
return nil, 0, nil
}
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain)
@ -1667,7 +1670,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
for block != nil && bc.skipBlock(err, it) {
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
if err := bc.writeKnownBlock(block); err != nil {
return it.index, err
return nil, it.index, err
}
lastCanon = block
@ -1681,12 +1684,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
if setHead {
// First block is pruned, insert as sidechain and reorg only if TD grows enough
log.Debug("Pruned ancestor, inserting as sidechain", "number", block.Number(), "hash", block.Hash())
return bc.insertSideChain(block, it)
return bc.insertSideChain(block, it, makeWitness)
} else {
// We're post-merge and the parent is pruned, try to recover the parent state
log.Debug("Pruned ancestor", "number", block.Number(), "hash", block.Hash())
_, err := bc.recoverAncestors(block)
return it.index, err
_, err := bc.recoverAncestors(block, makeWitness)
return nil, it.index, err
}
// Some other error(except ErrKnownBlock) occurred, abort.
// ErrKnownBlock is allowed here since some known blocks
@ -1694,7 +1697,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
case err != nil && !errors.Is(err, ErrKnownBlock):
stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err)
return it.index, err
return nil, it.index, err
}
// No validation errors for the first block (or chain prefix skipped)
var activeState *state.StateDB
@ -1708,6 +1711,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
}
}()
// Track the singleton witness from this chain insertion (if any)
var witness *stateless.Witness
for ; block != nil && err == nil || errors.Is(err, ErrKnownBlock); block, err = it.next() {
// If the chain is terminating, stop processing blocks
if bc.insertStopped() {
@ -1744,7 +1750,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
"hash", block.Hash(), "number", block.NumberU64())
}
if err := bc.writeKnownBlock(block); err != nil {
return it.index, err
return nil, it.index, err
}
stats.processed++
if bc.logger != nil && bc.logger.OnSkippedBlock != nil {
@ -1755,13 +1761,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
Safe: bc.CurrentSafeBlock(),
})
}
// We can assume that logs are empty here, since the only way for consecutive
// Clique blocks to have the same state is if there are no transactions.
lastCanon = block
continue
}
// Retrieve the parent block and it's state to execute on top
start := time.Now()
parent := it.previous()
@ -1770,7 +1774,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
}
statedb, err := state.New(parent.Root, bc.statedb)
if err != nil {
return it.index, err
return nil, it.index, err
}
statedb.SetLogger(bc.logger)
@ -1778,11 +1782,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
// while processing transactions. Before Byzantium the prefetcher is mostly
// useless due to the intermediate root hashing after each transaction.
if bc.chainConfig.IsByzantium(block.Number()) {
var witness *stateless.Witness
if bc.vmConfig.EnableWitnessCollection {
witness, err = stateless.NewWitness(bc, block)
// Generate witnesses either if we're self-testing, or if it's the
// only block being inserted. A bit crude, but witnesses are huge,
// so we refuse to make an entire chain of them.
if bc.vmConfig.StatelessSelfValidation || (makeWitness && len(chain) == 1) {
witness, err = stateless.NewWitness(block.Header(), bc)
if err != nil {
return it.index, err
return nil, it.index, err
}
}
statedb.StartPrefetcher("chain", witness)
@ -1814,7 +1820,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
res, err := bc.processBlock(block, statedb, start, setHead)
followupInterrupt.Store(true)
if err != nil {
return it.index, err
return nil, it.index, err
}
// Report the import stats before returning the various results
stats.processed++
@ -1831,7 +1837,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
// After merge we expect few side chains. Simply count
// all blocks the CL gives us for GC processing time
bc.gcproc += res.procTime
return it.index, nil // Direct block insertion of a single block
return witness, it.index, nil // Direct block insertion of a single block
}
switch res.status {
case CanonStatTy:
@ -1861,7 +1867,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
}
}
stats.ignored += it.remaining()
return it.index, err
return witness, it.index, err
}
// blockProcessingResult is a summary of block processing
@ -1906,13 +1912,36 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
}
vtime := time.Since(vstart)
if witness := statedb.Witness(); witness != nil {
if err = bc.validator.ValidateWitness(witness, block.ReceiptHash(), block.Root()); err != nil {
bc.reportBlock(block, res, err)
return nil, fmt.Errorf("cross verification failed: %v", err)
// If witnesses was generated and stateless self-validation requested, do
// that now. Self validation should *never* run in production, it's more of
// a tight integration to enable running *all* consensus tests through the
// witness builder/runner, which would otherwise be impossible due to the
// various invalid chain states/behaviors being contained in those tests.
xvstart := time.Now()
if witness := statedb.Witness(); witness != nil && bc.vmConfig.StatelessSelfValidation {
log.Warn("Running stateless self-validation", "block", block.Number(), "hash", block.Hash())
// Remove critical computed fields from the block to force true recalculation
context := block.Header()
context.Root = common.Hash{}
context.ReceiptHash = common.Hash{}
task := types.NewBlockWithHeader(context).WithBody(*block.Body())
// Run the stateless self-cross-validation
crossStateRoot, crossReceiptRoot, err := ExecuteStateless(bc.chainConfig, task, witness)
if err != nil {
return nil, fmt.Errorf("stateless self-validation failed: %v", err)
}
if crossStateRoot != block.Root() {
return nil, fmt.Errorf("stateless self-validation root mismatch (cross: %x local: %x)", crossStateRoot, block.Root())
}
if crossReceiptRoot != block.ReceiptHash() {
return nil, fmt.Errorf("stateless self-validation receipt root mismatch (cross: %x local: %x)", crossReceiptRoot, block.ReceiptHash())
}
}
proctime := time.Since(start) // processing + validation
xvtime := time.Since(xvstart)
proctime := time.Since(start) // processing + validation + cross validation
// Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
@ -1930,6 +1959,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
blockExecutionTimer.Update(ptime - (statedb.AccountReads + statedb.StorageReads)) // The time spent on EVM processing
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
blockCrossValidationTimer.Update(xvtime) // The time spent on stateless cross validation
// Write the block to the chain and get the status.
var (
@ -1964,7 +1994,7 @@ func (bc *BlockChain) processBlock(block *types.Block, statedb *state.StateDB, s
// The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain.
// insertSideChain is only used pre-merge.
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator, makeWitness bool) (*stateless.Witness, int, error) {
var (
externTd *big.Int
current = bc.CurrentBlock()
@ -2000,7 +2030,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
// mechanism.
return it.index, errors.New("sidechain ghost-state attack")
return nil, it.index, errors.New("sidechain ghost-state attack")
}
}
if externTd == nil {
@ -2011,7 +2041,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
start := time.Now()
if err := bc.writeBlockWithoutState(block, externTd); err != nil {
return it.index, err
return nil, it.index, err
}
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
@ -2028,7 +2058,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
for parent != nil && !bc.HasState(parent.Root) {
if bc.stateRecoverable(parent.Root) {
if err := bc.triedb.Recover(parent.Root); err != nil {
return 0, err
return nil, 0, err
}
break
}
@ -2038,7 +2068,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
}
if parent == nil {
return it.index, errors.New("missing parent")
return nil, it.index, errors.New("missing parent")
}
// Import all the pruned blocks to make the state available
var (
@ -2057,30 +2087,30 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// memory here.
if len(blocks) >= 2048 || memory > 64*1024*1024 {
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
if _, err := bc.insertChain(blocks, true); err != nil {
return 0, err
if _, _, err := bc.insertChain(blocks, true, false); err != nil {
return nil, 0, err
}
blocks, memory = blocks[:0], 0
// If the chain is terminating, stop processing blocks
if bc.insertStopped() {
log.Debug("Abort during blocks processing")
return 0, nil
return nil, 0, nil
}
}
}
if len(blocks) > 0 {
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
return bc.insertChain(blocks, true)
return bc.insertChain(blocks, true, makeWitness)
}
return 0, nil
return nil, 0, nil
}
// recoverAncestors finds the closest ancestor with available state and re-execute
// all the ancestor blocks since that.
// recoverAncestors is only used post-merge.
// We return the hash of the latest block that we could correctly validate.
func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error) {
func (bc *BlockChain) recoverAncestors(block *types.Block, makeWitness bool) (common.Hash, error) {
// Gather all the sidechain hashes (full blocks may be memory heavy)
var (
hashes []common.Hash
@ -2120,7 +2150,7 @@ func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error)
} else {
b = bc.GetBlock(hashes[i], numbers[i])
}
if _, err := bc.insertChain(types.Blocks{b}, false); err != nil {
if _, _, err := bc.insertChain(types.Blocks{b}, false, makeWitness && i == 0); err != nil {
return b.ParentHash(), err
}
}
@ -2336,14 +2366,14 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
// The key difference between the InsertChain is it won't do the canonical chain
// updating. It relies on the additional SetCanonical call to finalize the entire
// procedure.
func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block, makeWitness bool) (*stateless.Witness, error) {
if !bc.chainmu.TryLock() {
return errChainStopped
return nil, errChainStopped
}
defer bc.chainmu.Unlock()
_, err := bc.insertChain(types.Blocks{block}, false)
return err
witness, _, err := bc.insertChain(types.Blocks{block}, false, makeWitness)
return witness, err
}
// SetCanonical rewinds the chain to set the new head block as the specified
@ -2357,7 +2387,7 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
// Re-execute the reorged chain in case the head state is missing.
if !bc.HasState(head.Root()) {
if latestValidHash, err := bc.recoverAncestors(head); err != nil {
if latestValidHash, err := bc.recoverAncestors(head, false); err != nil {
return latestValidHash, err
}
log.Info("Recovered head state", "number", head.Number(), "hash", head.Hash())

@ -1757,8 +1757,8 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump(true))
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
// fmt.Println(tt.dump(false))
// Create a temporary persistent database
datadir := t.TempDir()
@ -1908,7 +1908,7 @@ func TestIssue23496(t *testing.T) {
func testIssue23496(t *testing.T, scheme string) {
// It's hard to follow the test case, visualize the input
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
// Create a temporary persistent database
datadir := t.TempDir()

@ -1961,7 +1961,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
// fmt.Println(tt.dump(false))
// Create a temporary persistent database

@ -222,8 +222,8 @@ type snapshotTest struct {
func (snaptest *snapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
// fmt.Println(snaptest.dump())
chain, blocks := snaptest.prepare(t)
// Restart the chain normally
@ -245,8 +245,8 @@ type crashSnapshotTest struct {
func (snaptest *crashSnapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
// fmt.Println(snaptest.dump())
chain, blocks := snaptest.prepare(t)
// Pull the plug on the database, simulating a hard crash
@ -297,8 +297,8 @@ type gappedSnapshotTest struct {
func (snaptest *gappedSnapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
// fmt.Println(snaptest.dump())
chain, blocks := snaptest.prepare(t)
// Insert blocks without enabling snapshot if gapping is required.
@ -341,8 +341,8 @@ type setHeadSnapshotTest struct {
func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
// fmt.Println(snaptest.dump())
chain, blocks := snaptest.prepare(t)
// Rewind the chain if setHead operation is required.
@ -370,8 +370,8 @@ type wipeCrashSnapshotTest struct {
func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
// fmt.Println(snaptest.dump())
chain, blocks := snaptest.prepare(t)
// Firstly, stop the chain properly, with all snapshot journal

@ -2126,9 +2126,9 @@ func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommon
// [ Cn, Cn+1, Cc, Sn+3 ... Sm]
// ^ ^ ^ pruned
func TestPrunedImportSide(t *testing.T) {
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
//glogger.Verbosity(3)
//log.Root().SetHandler(log.Handler(glogger))
// glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
// glogger.Verbosity(3)
// log.SetDefault(log.NewLogger(glogger))
testSideImport(t, 3, 3, -1)
testSideImport(t, 3, -3, -1)
testSideImport(t, 10, 0, -1)
@ -2137,9 +2137,9 @@ func TestPrunedImportSide(t *testing.T) {
}
func TestPrunedImportSideWithMerging(t *testing.T) {
//glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false)))
//glogger.Verbosity(3)
//log.Root().SetHandler(log.Handler(glogger))
// glogger := log.NewGlogHandler(log.NewTerminalHandler(os.Stderr, false))
// glogger.Verbosity(3)
// log.SetDefault(log.NewLogger(glogger))
testSideImport(t, 3, 3, 0)
testSideImport(t, 3, -3, 0)
testSideImport(t, 10, 0, 0)
@ -3629,7 +3629,7 @@ func TestSetCanonical(t *testing.T) {
}
func testSetCanonical(t *testing.T, scheme string) {
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true)))
var (
key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
@ -3674,7 +3674,7 @@ func testSetCanonical(t *testing.T, scheme string) {
gen.AddTx(tx)
})
for _, block := range side {
err := chain.InsertBlockWithoutSetHead(block)
_, err := chain.InsertBlockWithoutSetHead(block, false)
if err != nil {
t.Fatalf("Failed to insert into chain: %v", err)
}

@ -591,7 +591,7 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
// Pre-deploy EIP-4788 system contract
params.BeaconRootsAddress: {Nonce: 1, Code: params.BeaconRootsCode, Balance: common.Big0},
// Pre-deploy EIP-2935 history contract.
params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode},
params.HistoryStorageAddress: {Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0},
},
}
if faucet != nil {

@ -302,6 +302,10 @@ func ParseStateScheme(provided string, disk ethdb.Database) (string, error) {
log.Info("State scheme set to already existing", "scheme", stored)
return stored, nil // reuse scheme of persistent scheme
}
// If state scheme is specified, ensure it's valid.
if provided != HashScheme && provided != PathScheme {
return "", fmt.Errorf("invalid state scheme %s", provided)
}
// If state scheme is specified, ensure it's compatible with
// persistent state.
if stored == "" || provided == stored {

@ -28,8 +28,8 @@ import (
// mode specifies how a tree location has been accessed
// for the byte value:
// * the first bit is set if the branch has been edited
// * the second bit is set if the branch has been read
// * the first bit is set if the branch has been read
// * the second bit is set if the branch has been edited
type mode byte
const (

@ -1265,7 +1265,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
}
if !ret.empty() {
// If snapshotting is enabled, update the snapshot tree with this new version
if snap := s.db.Snapshot(); snap != nil {
if snap := s.db.Snapshot(); snap != nil && snap.Snapshot(ret.originRoot) != nil {
start := time.Now()
if err := snap.Update(ret.root, ret.originRoot, ret.destructs, ret.accounts, ret.storages); err != nil {
log.Warn("Failed to update snapshot tree", "from", ret.originRoot, "to", ret.root, "err", err)

@ -25,21 +25,30 @@ import (
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
)
// ExecuteStateless runs a stateless execution based on a witness, verifies
// everything it can locally and returns the two computed fields that need the
// other side to explicitly check.
// everything it can locally and returns the state root and receipt root, that
// need the other side to explicitly check.
//
// This method is a bit of a sore thumb here, but:
// - It cannot be placed in core/stateless, because state.New prodces a circular dep
// - It cannot be placed outside of core, because it needs to construct a dud headerchain
//
// TODO(karalabe): Would be nice to resolve both issues above somehow and move it.
func ExecuteStateless(config *params.ChainConfig, witness *stateless.Witness) (common.Hash, common.Hash, error) {
func ExecuteStateless(config *params.ChainConfig, block *types.Block, witness *stateless.Witness) (common.Hash, common.Hash, error) {
// Sanity check if the supplied block accidentally contains a set root or
// receipt hash. If so, be very loud, but still continue.
if block.Root() != (common.Hash{}) {
log.Error("stateless runner received state root it's expected to calculate (faulty consensus client)", "block", block.Number())
}
if block.ReceiptHash() != (common.Hash{}) {
log.Error("stateless runner received receipt root it's expected to calculate (faulty consensus client)", "block", block.Number())
}
// Create and populate the state database to serve as the stateless backend
memdb := witness.MakeHashDB()
db, err := state.New(witness.Root(), state.NewDatabase(triedb.NewDatabase(memdb, triedb.HashDefaults), nil))
@ -57,16 +66,15 @@ func ExecuteStateless(config *params.ChainConfig, witness *stateless.Witness) (c
validator := NewBlockValidator(config, nil) // No chain, we only validate the state, not the block
// Run the stateless blocks processing and self-validate certain fields
res, err := processor.Process(witness.Block, db, vm.Config{})
res, err := processor.Process(block, db, vm.Config{})
if err != nil {
return common.Hash{}, common.Hash{}, err
}
if err = validator.ValidateState(witness.Block, db, res, true); err != nil {
if err = validator.ValidateState(block, db, res, true); err != nil {
return common.Hash{}, common.Hash{}, err
}
// Almost everything validated, but receipt and state root needs to be returned
receiptRoot := types.DeriveSha(res.Receipts, trie.NewStackTrie(nil))
stateRoot := db.IntermediateRoot(config.IsEIP158(witness.Block.Number()))
return receiptRoot, stateRoot, nil
stateRoot := db.IntermediateRoot(config.IsEIP158(block.Number()))
return stateRoot, receiptRoot, nil
}

@ -26,6 +26,13 @@ import (
// MakeHashDB imports tries, codes and block hashes from a witness into a new
// hash-based memory db. We could eventually rewrite this into a pathdb, but
// simple is better for now.
//
// Note, this hashdb approach is quite strictly self-validating:
// - Headers are persisted keyed by hash, so blockhash will error on junk
// - Codes are persisted keyed by hash, so bytecode lookup will error on junk
// - Trie nodes are persisted keyed by hash, so trie expansion will error on junk
//
// Acceleration structures built would need to explicitly validate the witness.
func (w *Witness) MakeHashDB() ethdb.Database {
var (
memdb = rawdb.NewMemoryDatabase()

@ -17,42 +17,31 @@
package stateless
import (
"bytes"
"errors"
"fmt"
"io"
"slices"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
//go:generate go run github.com/fjl/gencodec -type extWitness -field-override extWitnessMarshalling -out gen_encoding_json.go
// toExtWitness converts our internal witness representation to the consensus one.
func (w *Witness) toExtWitness() *extWitness {
ext := &extWitness{
Block: w.Block,
Headers: w.Headers,
}
ext.Codes = make([][]byte, 0, len(w.Codes))
for code := range w.Codes {
ext.Codes = append(ext.Codes, []byte(code))
}
slices.SortFunc(ext.Codes, bytes.Compare)
ext.State = make([][]byte, 0, len(w.State))
for node := range w.State {
ext.State = append(ext.State, []byte(node))
}
slices.SortFunc(ext.State, bytes.Compare)
return ext
}
// fromExtWitness converts the consensus witness format into our internal one.
func (w *Witness) fromExtWitness(ext *extWitness) error {
w.Block, w.Headers = ext.Block, ext.Headers
w.Headers = ext.Headers
w.Codes = make(map[string]struct{}, len(ext.Codes))
for _, code := range ext.Codes {
@ -62,12 +51,7 @@ func (w *Witness) fromExtWitness(ext *extWitness) error {
for _, node := range ext.State {
w.State[string(node)] = struct{}{}
}
return w.sanitize()
}
// MarshalJSON marshals a witness as JSON.
func (w *Witness) MarshalJSON() ([]byte, error) {
return w.toExtWitness().MarshalJSON()
return nil
}
// EncodeRLP serializes a witness as RLP.
@ -75,15 +59,6 @@ func (w *Witness) EncodeRLP(wr io.Writer) error {
return rlp.Encode(wr, w.toExtWitness())
}
// UnmarshalJSON unmarshals from JSON.
func (w *Witness) UnmarshalJSON(input []byte) error {
var ext extWitness
if err := ext.UnmarshalJSON(input); err != nil {
return err
}
return w.fromExtWitness(&ext)
}
// DecodeRLP decodes a witness from RLP.
func (w *Witness) DecodeRLP(s *rlp.Stream) error {
var ext extWitness
@ -93,37 +68,9 @@ func (w *Witness) DecodeRLP(s *rlp.Stream) error {
return w.fromExtWitness(&ext)
}
// sanitize checks for some mandatory fields in the witness after decoding so
// the rest of the code can assume invariants and doesn't have to deal with
// corrupted data.
func (w *Witness) sanitize() error {
// Verify that the "parent" header (i.e. index 0) is available, and is the
// true parent of the block-to-be executed, since we use that to link the
// current block to the pre-state.
if len(w.Headers) == 0 {
return errors.New("parent header (for pre-root hash) missing")
}
for i, header := range w.Headers {
if header == nil {
return fmt.Errorf("witness header nil at position %d", i)
}
}
if w.Headers[0].Hash() != w.Block.ParentHash() {
return fmt.Errorf("parent hash different: witness %v, block parent %v", w.Headers[0].Hash(), w.Block.ParentHash())
}
return nil
}
// extWitness is a witness RLP encoding for transferring across clients.
type extWitness struct {
Block *types.Block `json:"block" gencodec:"required"`
Headers []*types.Header `json:"headers" gencodec:"required"`
Codes [][]byte `json:"codes"`
State [][]byte `json:"state"`
}
// extWitnessMarshalling defines the hex marshalling types for a witness.
type extWitnessMarshalling struct {
Codes []hexutil.Bytes
State []hexutil.Bytes
Headers []*types.Header
Codes [][]byte
State [][]byte
}

@ -1,74 +0,0 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package stateless
import (
"encoding/json"
"errors"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
var _ = (*extWitnessMarshalling)(nil)
// MarshalJSON marshals as JSON.
func (e extWitness) MarshalJSON() ([]byte, error) {
type extWitness struct {
Block *types.Block `json:"block" gencodec:"required"`
Headers []*types.Header `json:"headers" gencodec:"required"`
Codes []hexutil.Bytes `json:"codes"`
State []hexutil.Bytes `json:"state"`
}
var enc extWitness
enc.Block = e.Block
enc.Headers = e.Headers
if e.Codes != nil {
enc.Codes = make([]hexutil.Bytes, len(e.Codes))
for k, v := range e.Codes {
enc.Codes[k] = v
}
}
if e.State != nil {
enc.State = make([]hexutil.Bytes, len(e.State))
for k, v := range e.State {
enc.State[k] = v
}
}
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (e *extWitness) UnmarshalJSON(input []byte) error {
type extWitness struct {
Block *types.Block `json:"block" gencodec:"required"`
Headers []*types.Header `json:"headers" gencodec:"required"`
Codes []hexutil.Bytes `json:"codes"`
State []hexutil.Bytes `json:"state"`
}
var dec extWitness
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.Block == nil {
return errors.New("missing required field 'block' for extWitness")
}
e.Block = dec.Block
if dec.Headers == nil {
return errors.New("missing required field 'headers' for extWitness")
}
e.Headers = dec.Headers
if dec.Codes != nil {
e.Codes = make([][]byte, len(dec.Codes))
for k, v := range dec.Codes {
e.Codes[k] = v
}
}
if dec.State != nil {
e.State = make([][]byte, len(dec.State))
for k, v := range dec.State {
e.State[k] = v
}
}
return nil
}

@ -17,16 +17,13 @@
package stateless
import (
"bytes"
"errors"
"fmt"
"maps"
"slices"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp"
)
// HeaderReader is an interface to pull in headers in place of block hashes for
@ -36,10 +33,11 @@ type HeaderReader interface {
GetHeader(hash common.Hash, number uint64) *types.Header
}
// Witness encompasses a block, state and any other chain data required to apply
// a set of transactions and derive a post state/receipt root.
// Witness encompasses the state required to apply a set of transactions and
// derive a post state/receipt root.
type Witness struct {
Block *types.Block // Current block with rootHash and receiptHash zeroed out
context *types.Header // Header to which this witness belongs to, with rootHash and receiptHash zeroed out
Headers []*types.Header // Past headers in reverse order (0=parent, 1=parent's-parent, etc). First *must* be set.
Codes map[string]struct{} // Set of bytecodes ran or accessed
State map[string]struct{} // Set of MPT state trie nodes (account and storage together)
@ -49,24 +47,23 @@ type Witness struct {
}
// NewWitness creates an empty witness ready for population.
func NewWitness(chain HeaderReader, block *types.Block) (*Witness, error) {
// Zero out the result fields to avoid accidentally sending them to the verifier
header := block.Header()
header.Root = common.Hash{}
header.ReceiptHash = common.Hash{}
// Retrieve the parent header, which will *always* be included to act as a
// trustless pre-root hash container
parent := chain.GetHeader(block.ParentHash(), block.NumberU64()-1)
if parent == nil {
return nil, errors.New("failed to retrieve parent header")
func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) {
// When building witnesses, retrieve the parent header, which will *always*
// be included to act as a trustless pre-root hash container
var headers []*types.Header
if chain != nil {
parent := chain.GetHeader(context.ParentHash, context.Number.Uint64()-1)
if parent == nil {
return nil, errors.New("failed to retrieve parent header")
}
headers = append(headers, parent)
}
// Create the wtness with a reconstructed gutted out block
return &Witness{
Block: types.NewBlockWithHeader(header).WithBody(*block.Body()),
context: context,
Headers: headers,
Codes: make(map[string]struct{}),
State: make(map[string]struct{}),
Headers: []*types.Header{parent},
chain: chain,
}, nil
}
@ -76,11 +73,8 @@ func NewWitness(chain HeaderReader, block *types.Block) (*Witness, error) {
// the chain to cover the block being added.
func (w *Witness) AddBlockHash(number uint64) {
// Keep pulling in headers until this hash is populated
for int(w.Block.NumberU64()-number) > len(w.Headers) {
tail := w.Block.Header()
if len(w.Headers) > 0 {
tail = w.Headers[len(w.Headers)-1]
}
for int(w.context.Number.Uint64()-number) > len(w.Headers) {
tail := w.Headers[len(w.Headers)-1]
w.Headers = append(w.Headers, w.chain.GetHeader(tail.ParentHash, tail.Number.Uint64()-1))
}
}
@ -107,45 +101,16 @@ func (w *Witness) AddState(nodes map[string]struct{}) {
// Copy deep-copies the witness object. Witness.Block isn't deep-copied as it
// is never mutated by Witness
func (w *Witness) Copy() *Witness {
return &Witness{
Block: w.Block,
cpy := &Witness{
Headers: slices.Clone(w.Headers),
Codes: maps.Clone(w.Codes),
State: maps.Clone(w.State),
chain: w.chain,
}
}
// String prints a human-readable summary containing the total size of the
// witness and the sizes of the underlying components
func (w *Witness) String() string {
blob, _ := rlp.EncodeToBytes(w)
bytesTotal := len(blob)
blob, _ = rlp.EncodeToBytes(w.Block)
bytesBlock := len(blob)
bytesHeaders := 0
for _, header := range w.Headers {
blob, _ = rlp.EncodeToBytes(header)
bytesHeaders += len(blob)
}
bytesCodes := 0
for code := range w.Codes {
bytesCodes += len(code)
if w.context != nil {
cpy.context = types.CopyHeader(w.context)
}
bytesState := 0
for node := range w.State {
bytesState += len(node)
}
buf := new(bytes.Buffer)
fmt.Fprintf(buf, "Witness #%d: %v\n", w.Block.Number(), common.StorageSize(bytesTotal))
fmt.Fprintf(buf, " block (%4d txs): %10v\n", len(w.Block.Transactions()), common.StorageSize(bytesBlock))
fmt.Fprintf(buf, "%4d headers: %10v\n", len(w.Headers), common.StorageSize(bytesHeaders))
fmt.Fprintf(buf, "%4d trie nodes: %10v\n", len(w.State), common.StorageSize(bytesState))
fmt.Fprintf(buf, "%4d codes: %10v\n", len(w.Codes), common.StorageSize(bytesCodes))
return buf.String()
return cpy
}
// Root returns the pre-state root from the first header.

@ -34,6 +34,7 @@ type OpContext interface {
Address() common.Address
CallValue() *uint256.Int
CallInput() []byte
ContractCode() []byte
}
// StateDB gives tracers access to the whole state.

@ -949,9 +949,7 @@ func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*
lost = append(lost, tx)
}
}
if len(lost) > 0 {
reinject[addr] = lost
}
reinject[addr] = lost
// Update the set that was already reincluded to track the blocks in limbo
for _, tx := range types.TxDifference(included[addr], discarded[addr]) {

@ -27,7 +27,6 @@ import (
"path/filepath"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
@ -53,25 +52,14 @@ var (
emptyBlobVHash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit)
)
// Chain configuration with Cancun enabled.
//
// TODO(karalabe): replace with params.MainnetChainConfig after Cancun.
var testChainConfig *params.ChainConfig
func init() {
testChainConfig = new(params.ChainConfig)
*testChainConfig = *params.MainnetChainConfig
testChainConfig.CancunTime = new(uint64)
*testChainConfig.CancunTime = uint64(time.Now().Unix())
}
// testBlockChain is a mock of the live chain for testing the pool.
type testBlockChain struct {
config *params.ChainConfig
basefee *uint256.Int
blobfee *uint256.Int
statedb *state.StateDB
blocks map[uint64]*types.Block
}
func (bc *testBlockChain) Config() *params.ChainConfig {
@ -79,7 +67,7 @@ func (bc *testBlockChain) Config() *params.ChainConfig {
}
func (bc *testBlockChain) CurrentBlock() *types.Header {
// Yolo, life is too short to invert mist.CalcBaseFee and misc.CalcBlobFee,
// Yolo, life is too short to invert misc.CalcBaseFee and misc.CalcBlobFee,
// just binary search it them.
// The base fee at 5714 ETH translates into the 21000 base gas higher than
@ -142,7 +130,14 @@ func (bc *testBlockChain) CurrentFinalBlock() *types.Header {
}
func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
return nil
// This is very yolo for the tests. If the number is the origin block we use
// to init the pool, return an empty block with the correct starting header.
//
// If it is something else, return some baked in mock data.
if number == bc.config.LondonBlock.Uint64()+1 {
return types.NewBlockWithHeader(bc.CurrentBlock())
}
return bc.blocks[number]
}
func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) {
@ -181,14 +176,14 @@ func makeAddressReserver() txpool.AddressReserver {
// the blob pool.
func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction {
blobtx := makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap)
return types.MustSignNewTx(key, types.LatestSigner(testChainConfig), blobtx)
return types.MustSignNewTx(key, types.LatestSigner(params.MainnetChainConfig), blobtx)
}
// makeUnsignedTx is a utility method to construct a random blob transaction
// without signing it.
func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx {
return &types.BlobTx{
ChainID: uint256.MustFromBig(testChainConfig.ChainID),
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
Nonce: nonce,
GasTipCap: uint256.NewInt(gasTipCap),
GasFeeCap: uint256.NewInt(gasFeeCap),
@ -229,13 +224,17 @@ func verifyPoolInternals(t *testing.T, pool *BlobPool) {
for hash := range seen {
t.Errorf("indexed transaction hash #%x missing from lookup table", hash)
}
// Verify that transactions are sorted per account and contain no nonce gaps
// Verify that transactions are sorted per account and contain no nonce gaps,
// and that the first nonce is the next expected one based on the state.
for addr, txs := range pool.index {
for i := 1; i < len(txs); i++ {
if txs[i].nonce != txs[i-1].nonce+1 {
t.Errorf("addr %v, tx %d nonce mismatch: have %d, want %d", addr, i, txs[i].nonce, txs[i-1].nonce+1)
}
}
if txs[0].nonce != pool.state.GetNonce(addr) {
t.Errorf("addr %v, first tx nonce mismatch: have %d, want %d", addr, txs[0].nonce, pool.state.GetNonce(addr))
}
}
// Verify that calculated evacuation thresholds are correct
for addr, txs := range pool.index {
@ -331,7 +330,7 @@ func TestOpenDrops(t *testing.T) {
// Insert a transaction with a bad signature to verify that stale junk after
// potential hard-forks can get evicted (case 2)
tx := types.NewTx(&types.BlobTx{
ChainID: uint256.MustFromBig(testChainConfig.ChainID),
ChainID: uint256.MustFromBig(params.MainnetChainConfig.ChainID),
GasTipCap: new(uint256.Int),
GasFeeCap: new(uint256.Int),
Gas: 0,
@ -560,7 +559,7 @@ func TestOpenDrops(t *testing.T) {
statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
config: params.MainnetChainConfig,
basefee: uint256.NewInt(params.InitialBaseFee),
blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice),
statedb: statedb,
@ -679,7 +678,7 @@ func TestOpenIndex(t *testing.T) {
statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
config: params.MainnetChainConfig,
basefee: uint256.NewInt(params.InitialBaseFee),
blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice),
statedb: statedb,
@ -781,7 +780,7 @@ func TestOpenHeap(t *testing.T) {
statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
config: params.MainnetChainConfig,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
@ -861,7 +860,7 @@ func TestOpenCap(t *testing.T) {
statedb.Commit(0, true)
chain := &testBlockChain{
config: testChainConfig,
config: params.MainnetChainConfig,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
@ -908,13 +907,12 @@ func TestOpenCap(t *testing.T) {
func TestAdd(t *testing.T) {
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelTrace, true)))
// seed is a helper tumpe to seed an initial state db and pool
// seed is a helper tuple to seed an initial state db and pool
type seed struct {
balance uint64
nonce uint64
txs []*types.BlobTx
}
// addtx is a helper sender/tx tuple to represent a new tx addition
type addtx struct {
from string
@ -925,6 +923,7 @@ func TestAdd(t *testing.T) {
tests := []struct {
seeds map[string]seed
adds []addtx
block []addtx
}{
// Transactions from new accounts should be accepted if their initial
// nonce matches the expected one from the statedb. Higher or lower must
@ -1250,6 +1249,25 @@ func TestAdd(t *testing.T) {
},
},
},
// Tests issue #30518 where a refactor broke internal state invariants,
// causing included transactions not to be properly accounted and thus
// account states going our of sync with the chain.
{
seeds: map[string]seed{
"alice": {
balance: 1000000,
txs: []*types.BlobTx{
makeUnsignedTx(0, 1, 1, 1),
},
},
},
block: []addtx{
{
from: "alice",
tx: makeUnsignedTx(0, 1, 1, 1),
},
},
},
}
for i, tt := range tests {
// Create a temporary folder for the persistent backend
@ -1276,7 +1294,7 @@ func TestAdd(t *testing.T) {
// Sign the seed transactions and store them in the data store
for _, tx := range seed.txs {
signed := types.MustSignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx)
signed := types.MustSignNewTx(keys[acc], types.LatestSigner(params.MainnetChainConfig), tx)
blob, _ := rlp.EncodeToBytes(signed)
store.Put(blob)
}
@ -1286,7 +1304,7 @@ func TestAdd(t *testing.T) {
// Create a blob pool out of the pre-seeded dats
chain := &testBlockChain{
config: testChainConfig,
config: params.MainnetChainConfig,
basefee: uint256.NewInt(1050),
blobfee: uint256.NewInt(105),
statedb: statedb,
@ -1299,14 +1317,40 @@ func TestAdd(t *testing.T) {
// Add each transaction one by one, verifying the pool internals in between
for j, add := range tt.adds {
signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx)
signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(params.MainnetChainConfig), add.tx)
if err := pool.add(signed); !errors.Is(err, add.err) {
t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err)
}
verifyPoolInternals(t, pool)
}
// Verify the pool internals and close down the test
verifyPoolInternals(t, pool)
// If the test contains a chain head event, run that and gain verify the internals
if tt.block != nil {
// Fake a header for the new set of transactions
header := &types.Header{
Number: big.NewInt(int64(chain.CurrentBlock().Number.Uint64() + 1)),
BaseFee: chain.CurrentBlock().BaseFee, // invalid, but nothing checks it, yolo
}
// Inject the fake block into the chain
txs := make([]*types.Transaction, len(tt.block))
for j, inc := range tt.block {
txs[j] = types.MustSignNewTx(keys[inc.from], types.LatestSigner(params.MainnetChainConfig), inc.tx)
}
chain.blocks = map[uint64]*types.Block{
header.Number.Uint64(): types.NewBlockWithHeader(header).WithBody(types.Body{
Transactions: txs,
}),
}
// Apply the nonce updates to the state db
for _, tx := range txs {
sender, _ := types.Sender(types.LatestSigner(params.MainnetChainConfig), tx)
chain.statedb.SetNonce(sender, tx.Nonce()+1)
}
pool.Reset(chain.CurrentBlock(), header)
verifyPoolInternals(t, pool)
}
// Close down the test
pool.Close()
}
}
@ -1325,10 +1369,10 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
var (
basefee = uint64(1050)
blobfee = uint64(105)
signer = types.LatestSigner(testChainConfig)
signer = types.LatestSigner(params.MainnetChainConfig)
statedb, _ = state.New(types.EmptyRootHash, state.NewDatabaseForTesting())
chain = &testBlockChain{
config: testChainConfig,
config: params.MainnetChainConfig,
basefee: uint256.NewInt(basefee),
blobfee: uint256.NewInt(blobfee),
statedb: statedb,

@ -19,9 +19,7 @@ package core
import (
"sync/atomic"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
)
@ -35,9 +33,6 @@ type Validator interface {
// ValidateState validates the given statedb and optionally the process result.
ValidateState(block *types.Block, state *state.StateDB, res *ProcessResult, stateless bool) error
// ValidateWitness cross validates a block execution with stateless remote clients.
ValidateWitness(witness *stateless.Witness, receiptRoot common.Hash, stateRoot common.Hash) error
}
// Prefetcher is an interface for pre-caching transaction signatures and state.

@ -33,7 +33,8 @@ type Config struct {
NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls)
EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages
ExtraEips []int // Additional EIPS that are to be enabled
EnableWitnessCollection bool // true if witness collection is enabled
StatelessSelfValidation bool // Generate execution witnesses and self-check against them (testing purpose)
}
// ScopeContext contains the things that are per-call, such as stack and memory,
@ -83,6 +84,11 @@ func (ctx *ScopeContext) CallInput() []byte {
return ctx.Contract.Input
}
// ContractCode returns the code of the contract being executed.
func (ctx *ScopeContext) ContractCode() []byte {
return ctx.Contract.Code
}
// EVMInterpreter represents an EVM interpreter
type EVMInterpreter struct {
evm *EVM

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

@ -183,7 +183,6 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
var (
vmConfig = vm.Config{
EnablePreimageRecording: config.EnablePreimageRecording,
EnableWitnessCollection: config.EnableWitnessCollection,
}
cacheConfig = &core.CacheConfig{
TrieCleanLimit: config.TrieCleanCache,

@ -27,7 +27,9 @@ import (
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
@ -37,6 +39,7 @@ import (
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/params/forks"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
)
@ -82,6 +85,9 @@ var caps = []string{
"engine_forkchoiceUpdatedV1",
"engine_forkchoiceUpdatedV2",
"engine_forkchoiceUpdatedV3",
"engine_forkchoiceUpdatedWithWitnessV1",
"engine_forkchoiceUpdatedWithWitnessV2",
"engine_forkchoiceUpdatedWithWitnessV3",
"engine_exchangeTransitionConfigurationV1",
"engine_getPayloadV1",
"engine_getPayloadV2",
@ -91,6 +97,14 @@ var caps = []string{
"engine_newPayloadV2",
"engine_newPayloadV3",
"engine_newPayloadV4",
"engine_newPayloadWithWitnessV1",
"engine_newPayloadWithWitnessV2",
"engine_newPayloadWithWitnessV3",
"engine_newPayloadWithWitnessV4",
"engine_executeStatelessPayloadV1",
"engine_executeStatelessPayloadV2",
"engine_executeStatelessPayloadV3",
"engine_executeStatelessPayloadV4",
"engine_getPayloadBodiesByHashV1",
"engine_getPayloadBodiesByHashV2",
"engine_getPayloadBodiesByRangeV1",
@ -188,7 +202,7 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV1(update engine.ForkchoiceStateV1, pa
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("forkChoiceUpdateV1 called post-shanghai"))
}
}
return api.forkchoiceUpdated(update, payloadAttributes, engine.PayloadV1)
return api.forkchoiceUpdated(update, payloadAttributes, engine.PayloadV1, false)
}
// ForkchoiceUpdatedV2 is equivalent to V1 with the addition of withdrawals in the payload
@ -211,7 +225,7 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV2(update engine.ForkchoiceStateV1, pa
return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called with paris and shanghai payloads"))
}
}
return api.forkchoiceUpdated(update, params, engine.PayloadV2)
return api.forkchoiceUpdated(update, params, engine.PayloadV2, false)
}
// ForkchoiceUpdatedV3 is equivalent to V2 with the addition of parent beacon block root
@ -232,10 +246,68 @@ func (api *ConsensusAPI) ForkchoiceUpdatedV3(update engine.ForkchoiceStateV1, pa
// hash, even if params are wrong. To do this we need to split up
// forkchoiceUpdate into a function that only updates the head and then a
// function that kicks off block construction.
return api.forkchoiceUpdated(update, params, engine.PayloadV3)
return api.forkchoiceUpdated(update, params, engine.PayloadV3, false)
}
func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes, payloadVersion engine.PayloadVersion) (engine.ForkChoiceResponse, error) {
// ForkchoiceUpdatedWithWitnessV1 is analogous to ForkchoiceUpdatedV1, only it
// generates an execution witness too if block building was requested.
func (api *ConsensusAPI) ForkchoiceUpdatedWithWitnessV1(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
if payloadAttributes != nil {
if payloadAttributes.Withdrawals != nil || payloadAttributes.BeaconRoot != nil {
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("withdrawals and beacon root not supported in V1"))
}
if api.eth.BlockChain().Config().IsShanghai(api.eth.BlockChain().Config().LondonBlock, payloadAttributes.Timestamp) {
return engine.STATUS_INVALID, engine.InvalidParams.With(errors.New("forkChoiceUpdateV1 called post-shanghai"))
}
}
return api.forkchoiceUpdated(update, payloadAttributes, engine.PayloadV1, true)
}
// ForkchoiceUpdatedWithWitnessV2 is analogous to ForkchoiceUpdatedV2, only it
// generates an execution witness too if block building was requested.
func (api *ConsensusAPI) ForkchoiceUpdatedWithWitnessV2(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
if params != nil {
if params.BeaconRoot != nil {
return engine.STATUS_INVALID, engine.InvalidPayloadAttributes.With(errors.New("unexpected beacon root"))
}
switch api.eth.BlockChain().Config().LatestFork(params.Timestamp) {
case forks.Paris:
if params.Withdrawals != nil {
return engine.STATUS_INVALID, engine.InvalidPayloadAttributes.With(errors.New("withdrawals before shanghai"))
}
case forks.Shanghai:
if params.Withdrawals == nil {
return engine.STATUS_INVALID, engine.InvalidPayloadAttributes.With(errors.New("missing withdrawals"))
}
default:
return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV2 must only be called with paris and shanghai payloads"))
}
}
return api.forkchoiceUpdated(update, params, engine.PayloadV2, true)
}
// ForkchoiceUpdatedWithWitnessV3 is analogous to ForkchoiceUpdatedV3, only it
// generates an execution witness too if block building was requested.
func (api *ConsensusAPI) ForkchoiceUpdatedWithWitnessV3(update engine.ForkchoiceStateV1, params *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) {
if params != nil {
if params.Withdrawals == nil {
return engine.STATUS_INVALID, engine.InvalidPayloadAttributes.With(errors.New("missing withdrawals"))
}
if params.BeaconRoot == nil {
return engine.STATUS_INVALID, engine.InvalidPayloadAttributes.With(errors.New("missing beacon root"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun && api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.STATUS_INVALID, engine.UnsupportedFork.With(errors.New("forkchoiceUpdatedV3 must only be called for cancun payloads"))
}
}
// TODO(matt): the spec requires that fcu is applied when called on a valid
// hash, even if params are wrong. To do this we need to split up
// forkchoiceUpdate into a function that only updates the head and then a
// function that kicks off block construction.
return api.forkchoiceUpdated(update, params, engine.PayloadV3, true)
}
func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes, payloadVersion engine.PayloadVersion, payloadWitness bool) (engine.ForkChoiceResponse, error) {
api.forkchoiceLock.Lock()
defer api.forkchoiceLock.Unlock()
@ -378,7 +450,7 @@ func (api *ConsensusAPI) forkchoiceUpdated(update engine.ForkchoiceStateV1, payl
if api.localBlocks.has(id) {
return valid(&id), nil
}
payload, err := api.eth.Miner().BuildPayload(args)
payload, err := api.eth.Miner().BuildPayload(args, payloadWitness)
if err != nil {
log.Error("Failed to build payload", "err", err)
return valid(nil), engine.InvalidPayloadAttributes.With(err)
@ -469,7 +541,7 @@ func (api *ConsensusAPI) NewPayloadV1(params engine.ExecutableData) (engine.Payl
if params.Withdrawals != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
return api.newPayload(params, nil, nil)
return api.newPayload(params, nil, nil, false)
}
// NewPayloadV2 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
@ -492,7 +564,7 @@ func (api *ConsensusAPI) NewPayloadV2(params engine.ExecutableData) (engine.Payl
if params.BlobGasUsed != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
return api.newPayload(params, nil, nil)
return api.newPayload(params, nil, nil, false)
}
// NewPayloadV3 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
@ -517,9 +589,10 @@ func (api *ConsensusAPI) NewPayloadV3(params engine.ExecutableData, versionedHas
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV3 must only be called for cancun payloads"))
}
return api.newPayload(params, versionedHashes, beaconRoot)
return api.newPayload(params, versionedHashes, beaconRoot, false)
}
// NewPayloadV4 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
// NewPayloadV4 creates an Eth1 block, inserts it in the chain, and returns the status of the chain.
func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
if params.Withdrawals == nil {
@ -545,10 +618,186 @@ func (api *ConsensusAPI) NewPayloadV4(params engine.ExecutableData, versionedHas
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadV4 must only be called for prague payloads"))
}
return api.newPayload(params, versionedHashes, beaconRoot)
return api.newPayload(params, versionedHashes, beaconRoot, false)
}
func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
// NewPayloadWithWitnessV1 is analogous to NewPayloadV1, only it also generates
// and returns a stateless witness after running the payload.
func (api *ConsensusAPI) NewPayloadWithWitnessV1(params engine.ExecutableData) (engine.PayloadStatusV1, error) {
if params.Withdrawals != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
return api.newPayload(params, nil, nil, true)
}
// NewPayloadWithWitnessV2 is analogous to NewPayloadV2, only it also generates
// and returns a stateless witness after running the payload.
func (api *ConsensusAPI) NewPayloadWithWitnessV2(params engine.ExecutableData) (engine.PayloadStatusV1, error) {
if api.eth.BlockChain().Config().IsCancun(api.eth.BlockChain().Config().LondonBlock, params.Timestamp) {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("can't use newPayloadV2 post-cancun"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) == forks.Shanghai {
if params.Withdrawals == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
} else {
if params.Withdrawals != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai"))
}
}
if params.ExcessBlobGas != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun"))
}
if params.BlobGasUsed != nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
return api.newPayload(params, nil, nil, true)
}
// NewPayloadWithWitnessV3 is analogous to NewPayloadV3, only it also generates
// and returns a stateless witness after running the payload.
func (api *ConsensusAPI) NewPayloadWithWitnessV3(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
if params.ExcessBlobGas == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun"))
}
if params.BlobGasUsed == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
if versionedHashes == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
}
if beaconRoot == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV3 must only be called for cancun payloads"))
}
return api.newPayload(params, versionedHashes, beaconRoot, true)
}
// NewPayloadWithWitnessV4 is analogous to NewPayloadV4, only it also generates
// and returns a stateless witness after running the payload.
func (api *ConsensusAPI) NewPayloadWithWitnessV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
if params.ExcessBlobGas == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun"))
}
if params.BlobGasUsed == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
if params.Deposits == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
}
if versionedHashes == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
}
if beaconRoot == nil {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.PayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("newPayloadWithWitnessV4 must only be called for prague payloads"))
}
return api.newPayload(params, versionedHashes, beaconRoot, true)
}
// ExecuteStatelessPayloadV1 is analogous to NewPayloadV1, only it operates in
// a stateless mode on top of a provided witness instead of the local database.
func (api *ConsensusAPI) ExecuteStatelessPayloadV1(params engine.ExecutableData, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
if params.Withdrawals != nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("withdrawals not supported in V1"))
}
return api.executeStatelessPayload(params, nil, nil, opaqueWitness)
}
// ExecuteStatelessPayloadV2 is analogous to NewPayloadV2, only it operates in
// a stateless mode on top of a provided witness instead of the local database.
func (api *ConsensusAPI) ExecuteStatelessPayloadV2(params engine.ExecutableData, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
if api.eth.BlockChain().Config().IsCancun(api.eth.BlockChain().Config().LondonBlock, params.Timestamp) {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("can't use newPayloadV2 post-cancun"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) == forks.Shanghai {
if params.Withdrawals == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
} else {
if params.Withdrawals != nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai"))
}
}
if params.ExcessBlobGas != nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun"))
}
if params.BlobGasUsed != nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun"))
}
return api.executeStatelessPayload(params, nil, nil, opaqueWitness)
}
// ExecuteStatelessPayloadV3 is analogous to NewPayloadV3, only it operates in
// a stateless mode on top of a provided witness instead of the local database.
func (api *ConsensusAPI) ExecuteStatelessPayloadV3(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
if params.ExcessBlobGas == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun"))
}
if params.BlobGasUsed == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
if versionedHashes == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
}
if beaconRoot == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Cancun {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("executeStatelessPayloadV3 must only be called for cancun payloads"))
}
return api.executeStatelessPayload(params, versionedHashes, beaconRoot, opaqueWitness)
}
// ExecuteStatelessPayloadV4 is analogous to NewPayloadV4, only it operates in
// a stateless mode on top of a provided witness instead of the local database.
func (api *ConsensusAPI) ExecuteStatelessPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
if params.Withdrawals == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai"))
}
if params.ExcessBlobGas == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun"))
}
if params.BlobGasUsed == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun"))
}
if params.Deposits == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil deposits post-prague"))
}
if versionedHashes == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun"))
}
if beaconRoot == nil {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun"))
}
if api.eth.BlockChain().Config().LatestFork(params.Timestamp) != forks.Prague {
return engine.StatelessPayloadStatusV1{Status: engine.INVALID}, engine.UnsupportedFork.With(errors.New("executeStatelessPayloadV4 must only be called for prague payloads"))
}
return api.executeStatelessPayload(params, versionedHashes, beaconRoot, opaqueWitness)
}
func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, witness bool) (engine.PayloadStatusV1, error) {
// The locking here is, strictly, not required. Without these locks, this can happen:
//
// 1. NewPayload( execdata-N ) is invoked from the CL. It goes all the way down to
@ -656,8 +905,9 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
return engine.PayloadStatusV1{Status: engine.ACCEPTED}, nil
}
log.Trace("Inserting block without sethead", "hash", block.Hash(), "number", block.Number())
if err := api.eth.BlockChain().InsertBlockWithoutSetHead(block); err != nil {
log.Warn("NewPayloadV1: inserting block failed", "error", err)
proofs, err := api.eth.BlockChain().InsertBlockWithoutSetHead(block, witness)
if err != nil {
log.Warn("NewPayload: inserting block failed", "error", err)
api.invalidLock.Lock()
api.invalidBlocksHits[block.Hash()] = 1
@ -667,7 +917,71 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
return api.invalid(err, parent.Header()), nil
}
hash := block.Hash()
return engine.PayloadStatusV1{Status: engine.VALID, LatestValidHash: &hash}, nil
// If witness collection was requested, inject that into the result too
var ow *hexutil.Bytes
if proofs != nil {
ow = new(hexutil.Bytes)
*ow, _ = rlp.EncodeToBytes(proofs)
}
return engine.PayloadStatusV1{Status: engine.VALID, Witness: ow, LatestValidHash: &hash}, nil
}
func (api *ConsensusAPI) executeStatelessPayload(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, opaqueWitness hexutil.Bytes) (engine.StatelessPayloadStatusV1, error) {
log.Trace("Engine API request received", "method", "ExecuteStatelessPayload", "number", params.Number, "hash", params.BlockHash)
block, err := engine.ExecutableDataToBlockNoHash(params, versionedHashes, beaconRoot)
if err != nil {
bgu := "nil"
if params.BlobGasUsed != nil {
bgu = strconv.Itoa(int(*params.BlobGasUsed))
}
ebg := "nil"
if params.ExcessBlobGas != nil {
ebg = strconv.Itoa(int(*params.ExcessBlobGas))
}
log.Warn("Invalid ExecuteStatelessPayload params",
"params.Number", params.Number,
"params.ParentHash", params.ParentHash,
"params.BlockHash", params.BlockHash,
"params.StateRoot", params.StateRoot,
"params.FeeRecipient", params.FeeRecipient,
"params.LogsBloom", common.PrettyBytes(params.LogsBloom),
"params.Random", params.Random,
"params.GasLimit", params.GasLimit,
"params.GasUsed", params.GasUsed,
"params.Timestamp", params.Timestamp,
"params.ExtraData", common.PrettyBytes(params.ExtraData),
"params.BaseFeePerGas", params.BaseFeePerGas,
"params.BlobGasUsed", bgu,
"params.ExcessBlobGas", ebg,
"len(params.Transactions)", len(params.Transactions),
"len(params.Withdrawals)", len(params.Withdrawals),
"len(params.Deposits)", len(params.Deposits),
"beaconRoot", beaconRoot,
"error", err)
errorMsg := err.Error()
return engine.StatelessPayloadStatusV1{Status: engine.INVALID, ValidationError: &errorMsg}, nil
}
witness := new(stateless.Witness)
if err := rlp.DecodeBytes(opaqueWitness, witness); err != nil {
log.Warn("Invalid ExecuteStatelessPayload witness", "err", err)
errorMsg := err.Error()
return engine.StatelessPayloadStatusV1{Status: engine.INVALID, ValidationError: &errorMsg}, nil
}
// Stash away the last update to warn the user if the beacon client goes offline
api.lastNewPayloadLock.Lock()
api.lastNewPayloadUpdate = time.Now()
api.lastNewPayloadLock.Unlock()
log.Trace("Executing block statelessly", "number", block.Number(), "hash", params.BlockHash)
stateRoot, receiptRoot, err := core.ExecuteStateless(api.eth.BlockChain().Config(), block, witness)
if err != nil {
log.Warn("ExecuteStatelessPayload: execution failed", "err", err)
errorMsg := err.Error()
return engine.StatelessPayloadStatusV1{Status: engine.INVALID, ValidationError: &errorMsg}, nil
}
return engine.StatelessPayloadStatusV1{Status: engine.VALID, StateRoot: stateRoot, ReceiptsRoot: receiptRoot}, nil
}
// delayPayloadImport stashes the given block away for import at a later time,

@ -507,7 +507,7 @@ func setupBlocks(t *testing.T, ethservice *eth.Ethereum, n int, parent *types.He
}
payload := getNewPayload(t, api, parent, w, h)
execResp, err := api.newPayload(*payload, []common.Hash{}, h)
execResp, err := api.newPayload(*payload, []common.Hash{}, h, false)
if err != nil {
t.Fatalf("can't execute payload: %v", err)
}
@ -684,7 +684,7 @@ func assembleBlock(api *ConsensusAPI, parentHash common.Hash, params *engine.Pay
Withdrawals: params.Withdrawals,
BeaconRoot: params.BeaconRoot,
}
payload, err := api.eth.Miner().BuildPayload(args)
payload, err := api.eth.Miner().BuildPayload(args, false)
if err != nil {
return nil, err
}
@ -922,7 +922,7 @@ func TestNewPayloadOnInvalidTerminalBlock(t *testing.T) {
Random: crypto.Keccak256Hash([]byte{byte(1)}),
FeeRecipient: parent.Coinbase(),
}
payload, err := api.eth.Miner().BuildPayload(args)
payload, err := api.eth.Miner().BuildPayload(args, false)
if err != nil {
t.Fatalf("error preparing payload, err=%v", err)
}
@ -1704,6 +1704,108 @@ func TestParentBeaconBlockRoot(t *testing.T) {
}
}
func TestWitnessCreationAndConsumption(t *testing.T) {
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(colorable.NewColorableStderr(), log.LevelTrace, true)))
genesis, blocks := generateMergeChain(10, true)
// Set cancun time to semi-last block + 5 seconds
timestamp := blocks[len(blocks)-2].Time() + 5
genesis.Config.ShanghaiTime = &timestamp
genesis.Config.CancunTime = &timestamp
n, ethservice := startEthService(t, genesis, blocks[:9])
defer n.Close()
api := NewConsensusAPI(ethservice)
// Put the 10th block's tx in the pool and produce a new block
txs := blocks[9].Transactions()
ethservice.TxPool().Add(txs, true, true)
blockParams := engine.PayloadAttributes{
Timestamp: blocks[8].Time() + 5,
Withdrawals: make([]*types.Withdrawal, 0),
BeaconRoot: &common.Hash{42},
}
fcState := engine.ForkchoiceStateV1{
HeadBlockHash: blocks[8].Hash(),
SafeBlockHash: common.Hash{},
FinalizedBlockHash: common.Hash{},
}
_, err := api.ForkchoiceUpdatedWithWitnessV3(fcState, &blockParams)
if err != nil {
t.Fatalf("error preparing payload, err=%v", err)
}
// Give the payload some time to be built
time.Sleep(100 * time.Millisecond)
payloadID := (&miner.BuildPayloadArgs{
Parent: fcState.HeadBlockHash,
Timestamp: blockParams.Timestamp,
FeeRecipient: blockParams.SuggestedFeeRecipient,
Random: blockParams.Random,
Withdrawals: blockParams.Withdrawals,
BeaconRoot: blockParams.BeaconRoot,
Version: engine.PayloadV3,
}).Id()
envelope, err := api.GetPayloadV3(payloadID)
if err != nil {
t.Fatalf("error getting payload, err=%v", err)
}
if len(envelope.ExecutionPayload.Transactions) != blocks[9].Transactions().Len() {
t.Fatalf("invalid number of transactions %d != %d", len(envelope.ExecutionPayload.Transactions), blocks[9].Transactions().Len())
}
if envelope.Witness == nil {
t.Fatalf("witness missing from payload")
}
// Test stateless execution of the created witness
wantStateRoot := envelope.ExecutionPayload.StateRoot
wantReceiptRoot := envelope.ExecutionPayload.ReceiptsRoot
envelope.ExecutionPayload.StateRoot = common.Hash{}
envelope.ExecutionPayload.ReceiptsRoot = common.Hash{}
res, err := api.ExecuteStatelessPayloadV3(*envelope.ExecutionPayload, []common.Hash{}, &common.Hash{42}, *envelope.Witness)
if err != nil {
t.Fatalf("error executing stateless payload witness: %v", err)
}
if res.StateRoot != wantStateRoot {
t.Fatalf("stateless state root mismatch: have %v, want %v", res.StateRoot, wantStateRoot)
}
if res.ReceiptsRoot != wantReceiptRoot {
t.Fatalf("stateless receipt root mismatch: have %v, want %v", res.ReceiptsRoot, wantReceiptRoot)
}
// Test block insertion with witness creation
envelope.ExecutionPayload.StateRoot = wantStateRoot
envelope.ExecutionPayload.ReceiptsRoot = wantReceiptRoot
res2, err := api.NewPayloadWithWitnessV3(*envelope.ExecutionPayload, []common.Hash{}, &common.Hash{42})
if err != nil {
t.Fatalf("error executing stateless payload witness: %v", err)
}
if res2.Witness == nil {
t.Fatalf("witness missing from payload")
}
// Test stateless execution of the created witness
wantStateRoot = envelope.ExecutionPayload.StateRoot
wantReceiptRoot = envelope.ExecutionPayload.ReceiptsRoot
envelope.ExecutionPayload.StateRoot = common.Hash{}
envelope.ExecutionPayload.ReceiptsRoot = common.Hash{}
res, err = api.ExecuteStatelessPayloadV3(*envelope.ExecutionPayload, []common.Hash{}, &common.Hash{42}, *res2.Witness)
if err != nil {
t.Fatalf("error executing stateless payload witness: %v", err)
}
if res.StateRoot != wantStateRoot {
t.Fatalf("stateless state root mismatch: have %v, want %v", res.StateRoot, wantStateRoot)
}
if res.ReceiptsRoot != wantReceiptRoot {
t.Fatalf("stateless receipt root mismatch: have %v, want %v", res.ReceiptsRoot, wantReceiptRoot)
}
}
// TestGetClientVersion verifies the expected version info is returned.
func TestGetClientVersion(t *testing.T) {
genesis, preMergeBlocks := generateMergeChain(10, false)

@ -181,7 +181,7 @@ func (c *SimulatedBeacon) sealBlock(withdrawals []*types.Withdrawal, timestamp u
Withdrawals: withdrawals,
Random: random,
BeaconRoot: &common.Hash{},
}, engine.PayloadV3)
}, engine.PayloadV3, false)
if err != nil {
return err
}

@ -134,9 +134,6 @@ type Config struct {
// Enables tracking of SHA3 preimages in the VM
EnablePreimageRecording bool
// Enables prefetching trie nodes for read operations too
EnableWitnessCollection bool `toml:"-"`
// Enables VM tracing
VMTrace string
VMTraceJsonConfig string

@ -44,7 +44,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
BlobPool blobpool.Config
GPO gasprice.Config
EnablePreimageRecording bool
EnableWitnessCollection bool `toml:"-"`
VMTrace string
VMTraceJsonConfig string
DocRoot string `toml:"-"`
@ -82,7 +81,6 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.BlobPool = c.BlobPool
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.EnableWitnessCollection = c.EnableWitnessCollection
enc.VMTrace = c.VMTrace
enc.VMTraceJsonConfig = c.VMTraceJsonConfig
enc.DocRoot = c.DocRoot
@ -124,7 +122,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
BlobPool *blobpool.Config
GPO *gasprice.Config
EnablePreimageRecording *bool
EnableWitnessCollection *bool `toml:"-"`
VMTrace *string
VMTraceJsonConfig *string
DocRoot *string `toml:"-"`
@ -219,9 +216,6 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.EnablePreimageRecording != nil {
c.EnablePreimageRecording = *dec.EnablePreimageRecording
}
if dec.EnableWitnessCollection != nil {
c.EnableWitnessCollection = *dec.EnableWitnessCollection
}
if dec.VMTrace != nil {
c.VMTrace = *dec.VMTrace
}

@ -293,7 +293,9 @@ func (d *Database) Has(key []byte) (bool, error) {
} else if err != nil {
return false, err
}
closer.Close()
if err = closer.Close(); err != nil {
return false, err
}
return true, nil
}
@ -310,7 +312,9 @@ func (d *Database) Get(key []byte) ([]byte, error) {
}
ret := make([]byte, len(dat))
copy(ret, dat)
closer.Close()
if err = closer.Close(); err != nil {
return nil, err
}
return ret, nil
}
@ -519,14 +523,18 @@ type batch struct {
// Put inserts the given value into the batch for later committing.
func (b *batch) Put(key, value []byte) error {
b.b.Set(key, value, nil)
if err := b.b.Set(key, value, nil); err != nil {
return err
}
b.size += len(key) + len(value)
return nil
}
// Delete inserts the key removal into the batch for later committing.
func (b *batch) Delete(key []byte) error {
b.b.Delete(key, nil)
if err := b.b.Delete(key, nil); err != nil {
return err
}
b.size += len(key)
return nil
}
@ -558,19 +566,22 @@ func (b *batch) Replay(w ethdb.KeyValueWriter) error {
for {
kind, k, v, ok, err := reader.Next()
if !ok || err != nil {
break
return err
}
// The (k,v) slices might be overwritten if the batch is reset/reused,
// and the receiver should copy them if they are to be retained long-term.
if kind == pebble.InternalKeyKindSet {
w.Put(k, v)
if err = w.Put(k, v); err != nil {
return err
}
} else if kind == pebble.InternalKeyKindDelete {
w.Delete(k)
if err = w.Delete(k); err != nil {
return err
}
} else {
return fmt.Errorf("unhandled operation, keytype: %v", kind)
}
}
return nil
}
// pebbleIterator is a wrapper of underlying iterator in storage engine.

@ -61,7 +61,7 @@ require (
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible
github.com/status-im/keycard-go v0.2.0
github.com/stretchr/testify v1.9.0
github.com/supranational/blst v0.3.11
github.com/supranational/blst v0.3.13
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7
github.com/tyler-smith/go-bip39 v1.1.0
github.com/urfave/cli/v2 v2.25.7

@ -495,8 +495,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.13 h1:AYeSxdOMacwu7FBmpfloBz5pbFXDmJL33RuwnKtmTjk=
github.com/supranational/blst v0.3.13/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=

@ -1181,7 +1181,13 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S
// Make sure the context is cancelled when the call has completed
// this makes sure resources are cleaned up.
defer cancel()
return applyMessage(ctx, b, args, state, header, timeout, new(core.GasPool).AddGas(globalGasCap), &blockCtx, &vm.Config{NoBaseFee: true}, precompiles, true)
gp := new(core.GasPool)
if globalGasCap == 0 {
gp.AddGas(math.MaxUint64)
} else {
gp.AddGas(globalGasCap)
}
return applyMessage(ctx, b, args, state, header, timeout, gp, &blockCtx, &vm.Config{NoBaseFee: true}, precompiles, true)
}
func applyMessage(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, timeout time.Duration, gp *core.GasPool, blockContext *vm.BlockContext, vmConfig *vm.Config, precompiles vm.PrecompiledContracts, skipChecks bool) (*core.ExecutionResult, error) {
@ -1283,13 +1289,17 @@ func (api *BlockChainAPI) SimulateV1(ctx context.Context, opts simOpts, blockNrO
if state == nil || err != nil {
return nil, err
}
gasCap := api.b.RPCGasCap()
if gasCap == 0 {
gasCap = math.MaxUint64
}
sim := &simulator{
b: api.b,
state: state,
base: base,
chainConfig: api.b.ChainConfig(),
// Each tx and all the series of txes shouldn't consume more gas than cap
gp: new(core.GasPool).AddGas(api.b.RPCGasCap()),
gp: new(core.GasPool).AddGas(gasCap),
traceTransfers: opts.TraceTransfers,
validate: opts.Validation,
fullTx: opts.ReturnFullTransactions,

@ -126,8 +126,8 @@ func (miner *Miner) SetGasTip(tip *big.Int) error {
}
// BuildPayload builds the payload according to the provided parameters.
func (miner *Miner) BuildPayload(args *BuildPayloadArgs) (*Payload, error) {
return miner.buildPayload(args)
func (miner *Miner) BuildPayload(args *BuildPayloadArgs, witness bool) (*Payload, error) {
return miner.buildPayload(args, witness)
}
// getPending retrieves the pending block based on the current head block.
@ -156,7 +156,7 @@ func (miner *Miner) getPending() *newPayloadResult {
withdrawals: withdrawal,
beaconRoot: nil,
noTxs: false,
})
}, false) // we will never make a witness for a pending block
if ret.err != nil {
return nil
}

@ -25,6 +25,8 @@ import (
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
@ -67,22 +69,25 @@ func (args *BuildPayloadArgs) Id() engine.PayloadID {
// the revenue. Therefore, the empty-block here is always available and full-block
// will be set/updated afterwards.
type Payload struct {
id engine.PayloadID
empty *types.Block
full *types.Block
sidecars []*types.BlobTxSidecar
fullFees *big.Int
stop chan struct{}
lock sync.Mutex
cond *sync.Cond
id engine.PayloadID
empty *types.Block
emptyWitness *stateless.Witness
full *types.Block
fullWitness *stateless.Witness
sidecars []*types.BlobTxSidecar
fullFees *big.Int
stop chan struct{}
lock sync.Mutex
cond *sync.Cond
}
// newPayload initializes the payload object.
func newPayload(empty *types.Block, id engine.PayloadID) *Payload {
func newPayload(empty *types.Block, witness *stateless.Witness, id engine.PayloadID) *Payload {
payload := &Payload{
id: id,
empty: empty,
stop: make(chan struct{}),
id: id,
empty: empty,
emptyWitness: witness,
stop: make(chan struct{}),
}
log.Info("Starting work on payload", "id", payload.id)
payload.cond = sync.NewCond(&payload.lock)
@ -106,6 +111,7 @@ func (payload *Payload) update(r *newPayloadResult, elapsed time.Duration) {
payload.full = r.block
payload.fullFees = r.fees
payload.sidecars = r.sidecars
payload.fullWitness = r.witness
feesInEther := new(big.Float).Quo(new(big.Float).SetInt(r.fees), big.NewFloat(params.Ether))
log.Info("Updated payload",
@ -135,9 +141,19 @@ func (payload *Payload) Resolve() *engine.ExecutionPayloadEnvelope {
close(payload.stop)
}
if payload.full != nil {
return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
if payload.fullWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.fullWitness) // cannot fail
}
return envelope
}
envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
if payload.emptyWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.emptyWitness) // cannot fail
}
return engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
return envelope
}
// ResolveEmpty is basically identical to Resolve, but it expects empty block only.
@ -146,7 +162,12 @@ func (payload *Payload) ResolveEmpty() *engine.ExecutionPayloadEnvelope {
payload.lock.Lock()
defer payload.lock.Unlock()
return engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
envelope := engine.BlockToExecutableData(payload.empty, big.NewInt(0), nil)
if payload.emptyWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.emptyWitness) // cannot fail
}
return envelope
}
// ResolveFull is basically identical to Resolve, but it expects full block only.
@ -172,11 +193,16 @@ func (payload *Payload) ResolveFull() *engine.ExecutionPayloadEnvelope {
default:
close(payload.stop)
}
return engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
envelope := engine.BlockToExecutableData(payload.full, payload.fullFees, payload.sidecars)
if payload.fullWitness != nil {
envelope.Witness = new(hexutil.Bytes)
*envelope.Witness, _ = rlp.EncodeToBytes(payload.fullWitness) // cannot fail
}
return envelope
}
// buildPayload builds the payload according to the provided parameters.
func (miner *Miner) buildPayload(args *BuildPayloadArgs) (*Payload, error) {
func (miner *Miner) buildPayload(args *BuildPayloadArgs, witness bool) (*Payload, error) {
// Build the initial version with no transaction included. It should be fast
// enough to run. The empty payload can at least make sure there is something
// to deliver for not missing slot.
@ -190,13 +216,12 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs) (*Payload, error) {
beaconRoot: args.BeaconRoot,
noTxs: true,
}
empty := miner.generateWork(emptyParams)
empty := miner.generateWork(emptyParams, witness)
if empty.err != nil {
return nil, empty.err
}
// Construct a payload object for return.
payload := newPayload(empty.block, args.Id())
payload := newPayload(empty.block, empty.witness, args.Id())
// Spin up a routine for updating the payload in background. This strategy
// can maximum the revenue for including transactions with highest fee.
@ -226,7 +251,7 @@ func (miner *Miner) buildPayload(args *BuildPayloadArgs) (*Payload, error) {
select {
case <-timer.C:
start := time.Now()
r := miner.generateWork(fullParams)
r := miner.generateWork(fullParams, witness)
if r.err == nil {
payload.update(r, time.Since(start))
} else {

@ -160,7 +160,7 @@ func TestBuildPayload(t *testing.T) {
Random: common.Hash{},
FeeRecipient: recipient,
}
payload, err := w.buildPayload(args)
payload, err := w.buildPayload(args, false)
if err != nil {
t.Fatalf("Failed to build payload %v", err)
}

@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/stateless"
"github.com/ethereum/go-ethereum/core/txpool"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
@ -56,6 +57,8 @@ type environment struct {
receipts []*types.Receipt
sidecars []*types.BlobTxSidecar
blobs int
witness *stateless.Witness
}
const (
@ -73,6 +76,7 @@ type newPayloadResult struct {
sidecars []*types.BlobTxSidecar // collected blobs of blob transactions
stateDB *state.StateDB // StateDB after executing the transactions
receipts []*types.Receipt // Receipts collected during construction
witness *stateless.Witness // Witness is an optional stateless proof
}
// generateParams wraps various settings for generating sealing task.
@ -88,8 +92,8 @@ type generateParams struct {
}
// generateWork generates a sealing block based on the given parameters.
func (miner *Miner) generateWork(params *generateParams) *newPayloadResult {
work, err := miner.prepareWork(params)
func (miner *Miner) generateWork(params *generateParams, witness bool) *newPayloadResult {
work, err := miner.prepareWork(params, witness)
if err != nil {
return &newPayloadResult{err: err}
}
@ -129,13 +133,14 @@ func (miner *Miner) generateWork(params *generateParams) *newPayloadResult {
sidecars: work.sidecars,
stateDB: work.state,
receipts: work.receipts,
witness: work.witness,
}
}
// prepareWork constructs the sealing task according to the given parameters,
// either based on the last chain head or specified parent. In this function
// the pending transactions are not filled yet, only the empty task returned.
func (miner *Miner) prepareWork(genParams *generateParams) (*environment, error) {
func (miner *Miner) prepareWork(genParams *generateParams, witness bool) (*environment, error) {
miner.confMu.RLock()
defer miner.confMu.RUnlock()
@ -203,7 +208,7 @@ func (miner *Miner) prepareWork(genParams *generateParams) (*environment, error)
// Could potentially happen if starting to mine in an odd state.
// Note genParams.coinbase can be different with header.Coinbase
// since clique algorithm can modify the coinbase field in header.
env, err := miner.makeEnv(parent, header, genParams.coinbase)
env, err := miner.makeEnv(parent, header, genParams.coinbase, witness)
if err != nil {
log.Error("Failed to create sealing context", "err", err)
return nil, err
@ -222,18 +227,26 @@ func (miner *Miner) prepareWork(genParams *generateParams) (*environment, error)
}
// makeEnv creates a new environment for the sealing block.
func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address) (*environment, error) {
func (miner *Miner) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, witness bool) (*environment, error) {
// Retrieve the parent state to execute on top.
state, err := miner.chain.StateAt(parent.Root)
if err != nil {
return nil, err
}
if witness {
bundle, err := stateless.NewWitness(header, miner.chain)
if err != nil {
return nil, err
}
state.StartPrefetcher("miner", bundle)
}
// Note the passed coinbase may be different with header.Coinbase.
return &environment{
signer: types.MakeSigner(miner.chainConfig, header.Number, header.Time),
state: state,
coinbase: coinbase,
header: header,
witness: state.Witness(),
}, nil
}

@ -21,6 +21,7 @@ import (
"fmt"
"net/netip"
"slices"
"sync"
"testing"
"github.com/ethereum/go-ethereum/crypto"
@ -67,7 +68,11 @@ func TestUDPv4_Lookup(t *testing.T) {
func TestUDPv4_LookupIterator(t *testing.T) {
t.Parallel()
test := newUDPTest(t)
defer test.close()
var wg sync.WaitGroup
defer func() {
test.close()
wg.Wait()
}()
// Seed table with initial nodes.
bootnodes := make([]*enode.Node, len(lookupTestnet.dists[256]))
@ -75,7 +80,11 @@ func TestUDPv4_LookupIterator(t *testing.T) {
bootnodes[i] = lookupTestnet.node(256, i)
}
fillTable(test.table, bootnodes, true)
go serveTestnet(test, lookupTestnet)
wg.Add(1)
go func() {
serveTestnet(test, lookupTestnet)
wg.Done()
}()
// Create the iterator and collect the nodes it yields.
iter := test.udp.RandomNodes()
@ -102,7 +111,11 @@ func TestUDPv4_LookupIterator(t *testing.T) {
func TestUDPv4_LookupIteratorClose(t *testing.T) {
t.Parallel()
test := newUDPTest(t)
defer test.close()
var wg sync.WaitGroup
defer func() {
test.close()
wg.Wait()
}()
// Seed table with initial nodes.
bootnodes := make([]*enode.Node, len(lookupTestnet.dists[256]))
@ -110,7 +123,12 @@ func TestUDPv4_LookupIteratorClose(t *testing.T) {
bootnodes[i] = lookupTestnet.node(256, i)
}
fillTable(test.table, bootnodes, true)
go serveTestnet(test, lookupTestnet)
wg.Add(1)
go func() {
serveTestnet(test, lookupTestnet)
wg.Done()
}()
it := test.udp.RandomNodes()
if ok := it.Next(); !ok || it.Node() == nil {

@ -496,6 +496,10 @@ func nextNode(it iterator.Iterator) *Node {
// Close flushes and closes the database files.
func (db *DB) Close() {
close(db.quit)
select {
case <-db.quit: // already closed
default:
close(db.quit)
}
db.lvl.Close()
}

@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 14 // Minor version component of the current release
VersionPatch = 9 // Patch version component of the current release
VersionPatch = 10 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)

@ -152,7 +152,7 @@ func (t *BlockTest) Run(snapshotter bool, scheme string, witness bool, tracer *t
}
chain, err := core.NewBlockChain(db, cache, gspec, nil, engine, vm.Config{
Tracer: tracer,
EnableWitnessCollection: witness,
StatelessSelfValidation: witness,
}, nil)
if err != nil {
return err

Loading…
Cancel
Save