Merge branch 'master' of github.com:ethereum/go-ethereum into http-bind-proto

pull/29330/head
a 5 months ago
commit 8a17abc1f4
No known key found for this signature in database
GPG Key ID: 374BC539FE795AF0
  1. 2
      accounts/abi/argument.go
  2. 15
      accounts/external/backend.go
  3. 2
      accounts/keystore/account_cache.go
  4. 4
      accounts/keystore/account_cache_test.go
  5. 6
      accounts/keystore/keystore_test.go
  6. 33
      beacon/blsync/config.go
  7. 10
      beacon/engine/types.go
  8. 108
      beacon/light/api/light_api.go
  9. 4
      beacon/light/head_tracker.go
  10. 4
      beacon/light/request/server.go
  11. 4
      beacon/light/sync/head_sync.go
  12. 26
      beacon/types/config.go
  13. 2
      beacon/types/exec_header.go
  14. 30
      build/checksums.txt
  15. 3
      build/update-license.go
  16. 26
      cmd/clef/rules.md
  17. 2
      cmd/devp2p/dns_route53.go
  18. 2
      cmd/devp2p/internal/ethtest/chain.go
  19. 2
      cmd/devp2p/internal/ethtest/transaction.go
  20. 3
      cmd/devp2p/internal/v5test/discv5tests.go
  21. 9
      cmd/devp2p/internal/v5test/framework.go
  22. 2
      cmd/devp2p/nodeset.go
  23. 2
      cmd/ethkey/README.md
  24. 4
      cmd/evm/internal/t8ntool/flags.go
  25. 10
      cmd/evm/internal/t8ntool/transition.go
  26. 1
      cmd/evm/main.go
  27. 13
      cmd/evm/runner.go
  28. 8
      cmd/evm/t8n_test.go
  29. 1
      cmd/evm/testdata/32/README.md
  30. 30
      cmd/evm/testdata/32/alloc.json
  31. 12
      cmd/evm/testdata/32/env.json
  32. 61
      cmd/evm/testdata/32/trace-0-0x47806361c0fa084be3caa18afe8c48156747c01dbdfc1ee11b5aecdbe4fcf23e.jsonl
  33. 17
      cmd/evm/testdata/32/txs.json
  34. 2
      cmd/geth/attach_test.go
  35. 2
      cmd/utils/export_test.go
  36. 6
      common/lru/basiclru.go
  37. 10
      common/prque/lazyqueue.go
  38. 21
      common/prque/prque.go
  39. 20
      common/prque/sstack.go
  40. 2
      consensus/clique/snapshot.go
  41. 2
      consensus/clique/snapshot_test.go
  42. 2
      consensus/ethash/consensus.go
  43. 2
      core/asm/lexer.go
  44. 2
      core/block_validator.go
  45. 45
      core/blockchain.go
  46. 3
      core/blockchain_reader.go
  47. 4
      core/blockchain_sethead_test.go
  48. 6
      core/bloombits/matcher.go
  49. 109
      core/chain_makers.go
  50. 2
      core/evm.go
  51. 2
      core/forkid/forkid.go
  52. 2
      core/genesis.go
  53. 2
      core/mkalloc.go
  54. 4
      core/rawdb/accessors_chain.go
  55. 3
      core/rawdb/database.go
  56. 13
      core/rawdb/freezer_test.go
  57. 12
      core/state/access_list.go
  58. 2
      core/state/database.go
  59. 2
      core/state/snapshot/difflayer.go
  60. 2
      core/state/snapshot/iterator_fast.go
  61. 36
      core/state/state_object.go
  62. 45
      core/state/statedb.go
  63. 105
      core/state_processor_test.go
  64. 69
      core/tracing/CHANGELOG.md
  65. 2
      core/txpool/blobpool/blobpool.go
  66. 2
      core/txpool/blobpool/limbo.go
  67. 2
      core/txpool/legacypool/list.go
  68. 16
      core/types/block.go
  69. 34
      core/types/transaction.go
  70. 5
      core/types/transaction_signing.go
  71. 6
      core/types/transaction_test.go
  72. 6
      core/types/tx_blob.go
  73. 22
      core/vm/contracts.go
  74. 1
      core/vm/errors.go
  75. 12
      core/vm/evm.go
  76. 11
      core/vm/gas_table.go
  77. 3
      core/vm/gas_table_test.go
  78. 1
      core/vm/interface.go
  79. 9
      core/vm/interpreter.go
  80. 2
      crypto/signature_nocgo.go
  81. 1
      crypto/signify/signify_fuzz.go
  82. 2
      eth/api_debug_test.go
  83. 6
      eth/backend.go
  84. 2
      eth/catalyst/simulated_beacon.go
  85. 4
      eth/catalyst/simulated_beacon_test.go
  86. 6
      eth/downloader/beaconsync.go
  87. 12
      eth/downloader/downloader.go
  88. 2
      eth/downloader/downloader_test.go
  89. 2
      eth/downloader/skeleton_test.go
  90. 25
      eth/fetcher/tx_fetcher_test.go
  91. 15
      eth/filters/filter.go
  92. 4
      eth/filters/filter_system.go
  93. 2
      eth/filters/filter_system_test.go
  94. 2
      eth/gasprice/feehistory.go
  95. 2
      eth/gasprice/gasprice.go
  96. 6
      eth/handler.go
  97. 2
      eth/handler_eth_test.go
  98. 2
      eth/protocols/eth/handler_test.go
  99. 5
      eth/protocols/snap/metrics.go
  100. 154
      eth/protocols/snap/progress_test.go
  101. Some files were not shown because too many files have changed in this diff Show More

@ -127,7 +127,7 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
return arguments.copyAtomic(v, values[0]) return arguments.copyAtomic(v, values[0])
} }
// unpackAtomic unpacks ( hexdata -> go ) a single value // copyAtomic copies ( hexdata -> go ) a single value
func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error { func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error {
dst := reflect.ValueOf(v).Elem() dst := reflect.ValueOf(v).Elem()
src := reflect.ValueOf(marshalledValues) src := reflect.ValueOf(marshalledValues)

@ -205,7 +205,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
to = &t to = &t
} }
args := &apitypes.SendTxArgs{ args := &apitypes.SendTxArgs{
Data: &data, Input: &data,
Nonce: hexutil.Uint64(tx.Nonce()), Nonce: hexutil.Uint64(tx.Nonce()),
Value: hexutil.Big(*tx.Value()), Value: hexutil.Big(*tx.Value()),
Gas: hexutil.Uint64(tx.Gas()), Gas: hexutil.Uint64(tx.Gas()),
@ -215,7 +215,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
switch tx.Type() { switch tx.Type() {
case types.LegacyTxType, types.AccessListTxType: case types.LegacyTxType, types.AccessListTxType:
args.GasPrice = (*hexutil.Big)(tx.GasPrice()) args.GasPrice = (*hexutil.Big)(tx.GasPrice())
case types.DynamicFeeTxType: case types.DynamicFeeTxType, types.BlobTxType:
args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap())
args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap())
default: default:
@ -235,6 +235,17 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
accessList := tx.AccessList() accessList := tx.AccessList()
args.AccessList = &accessList args.AccessList = &accessList
} }
if tx.Type() == types.BlobTxType {
args.BlobHashes = tx.BlobHashes()
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
return nil, errors.New("blobs must be present for signing")
}
args.Blobs = sidecar.Blobs
args.Commitments = sidecar.Commitments
args.Proofs = sidecar.Proofs
}
var res signTransactionResult var res signTransactionResult
if err := api.client.Call(&res, "account_signTransaction", args); err != nil { if err := api.client.Call(&res, "account_signTransaction", args); err != nil {
return nil, err return nil, err

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -31,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"golang.org/x/exp/slices"
) )
// Minimum amount of time between cache reloads. This limit applies if the platform does // Minimum amount of time between cache reloads. This limit applies if the platform does

@ -23,6 +23,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"slices"
"testing" "testing"
"time" "time"
@ -30,7 +31,6 @@ import (
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"golang.org/x/exp/slices"
) )
var ( var (
@ -51,7 +51,7 @@ var (
} }
) )
// waitWatcherStarts waits up to 1s for the keystore watcher to start. // waitWatcherStart waits up to 1s for the keystore watcher to start.
func waitWatcherStart(ks *KeyStore) bool { func waitWatcherStart(ks *KeyStore) bool {
// On systems where file watch is not supported, just return "ok". // On systems where file watch is not supported, just return "ok".
if !ks.cache.watcher.enabled() { if !ks.cache.watcher.enabled() {

@ -20,6 +20,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"runtime" "runtime"
"slices"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -30,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"golang.org/x/exp/slices"
) )
var testSigData = make([]byte, 32) var testSigData = make([]byte, 32)
@ -343,7 +343,7 @@ func TestWalletNotifications(t *testing.T) {
checkEvents(t, wantEvents, events) checkEvents(t, wantEvents, events)
} }
// TestImportExport tests the import functionality of a keystore. // TestImportECDSA tests the import functionality of a keystore.
func TestImportECDSA(t *testing.T) { func TestImportECDSA(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t)
@ -362,7 +362,7 @@ func TestImportECDSA(t *testing.T) {
} }
} }
// TestImportECDSA tests the import and export functionality of a keystore. // TestImportExport tests the import and export functionality of a keystore.
func TestImportExport(t *testing.T) { func TestImportExport(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t) _, ks := tmpKeyStore(t)

@ -72,9 +72,9 @@ var (
) )
func makeChainConfig(ctx *cli.Context) lightClientConfig { func makeChainConfig(ctx *cli.Context) lightClientConfig {
utils.CheckExclusive(ctx, utils.MainnetFlag, utils.GoerliFlag, utils.SepoliaFlag)
customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name) || ctx.IsSet(utils.BeaconGenesisRootFlag.Name) || ctx.IsSet(utils.BeaconGenesisTimeFlag.Name)
var config lightClientConfig var config lightClientConfig
customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name)
utils.CheckExclusive(ctx, utils.MainnetFlag, utils.GoerliFlag, utils.SepoliaFlag, utils.BeaconConfigFlag)
switch { switch {
case ctx.Bool(utils.MainnetFlag.Name): case ctx.Bool(utils.MainnetFlag.Name):
config = MainnetConfig config = MainnetConfig
@ -87,24 +87,37 @@ func makeChainConfig(ctx *cli.Context) lightClientConfig {
config = MainnetConfig config = MainnetConfig
} }
} }
if customConfig && config.Forks != nil { // Genesis root and time should always be specified together with custom chain config
utils.Fatalf("Cannot use custom beacon chain config flags in combination with pre-defined network config") if customConfig {
if !ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but genesis root is missing")
}
if !ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but genesis time is missing")
}
if !ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but checkpoint is missing")
}
config.ChainConfig = &types.ChainConfig{
GenesisTime: ctx.Uint64(utils.BeaconGenesisTimeFlag.Name),
} }
if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 { if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
copy(config.GenesisValidatorsRoot[:len(c)], c) copy(config.GenesisValidatorsRoot[:len(c)], c)
} else { } else {
utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err) utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err)
} }
}
if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
config.GenesisTime = ctx.Uint64(utils.BeaconGenesisTimeFlag.Name)
}
if ctx.IsSet(utils.BeaconConfigFlag.Name) {
if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil { if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil {
utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err) utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err)
} }
} else {
if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
utils.Fatalf("Genesis root is specified but custom beacon chain config is missing")
}
if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
utils.Fatalf("Genesis time is specified but custom beacon chain config is missing")
}
} }
// Checkpoint is required with custom chain config and is optional with pre-defined config
if ctx.IsSet(utils.BeaconCheckpointFlag.Name) { if ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 { if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
copy(config.Checkpoint[:len(c)], c) copy(config.Checkpoint[:len(c)], c)

@ -19,6 +19,7 @@ package engine
import ( import (
"fmt" "fmt"
"math/big" "math/big"
"slices"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -132,12 +133,7 @@ func (b PayloadID) Version() PayloadVersion {
// Is returns whether the identifier matches any of provided payload versions. // Is returns whether the identifier matches any of provided payload versions.
func (b PayloadID) Is(versions ...PayloadVersion) bool { func (b PayloadID) Is(versions ...PayloadVersion) bool {
for _, v := range versions { return slices.Contains(versions, b.Version())
if v == b.Version() {
return true
}
}
return false
} }
func (b PayloadID) String() string { func (b PayloadID) String() string {
@ -313,7 +309,7 @@ const (
// ClientVersionV1 contains information which identifies a client implementation. // ClientVersionV1 contains information which identifies a client implementation.
type ClientVersionV1 struct { type ClientVersionV1 struct {
Code string `json:"code"` Code string `json:"code"`
Name string `json:"clientName"` Name string `json:"name"`
Version string `json:"version"` Version string `json:"version"`
Commit string `json:"commit"` Commit string `json:"commit"`
} }

@ -17,11 +17,13 @@
package api package api
import ( import (
"context"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"sync"
"time" "time"
"github.com/donovanhide/eventsource" "github.com/donovanhide/eventsource"
@ -287,7 +289,7 @@ func decodeFinalityUpdate(enc []byte) (types.FinalityUpdate, error) {
}, nil }, nil
} }
// GetHead fetches and validates the beacon header with the given blockRoot. // GetHeader fetches and validates the beacon header with the given blockRoot.
// If blockRoot is null hash then the latest head header is fetched. // If blockRoot is null hash then the latest head header is fetched.
func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error) { func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error) {
var blockId string var blockId string
@ -416,39 +418,34 @@ type HeadEventListener struct {
// The callbacks are also called for the current head and optimistic head at startup. // The callbacks are also called for the current head and optimistic head at startup.
// They are never called concurrently. // They are never called concurrently.
func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func() { func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func() {
closeCh := make(chan struct{}) // initiate closing the stream var (
closedCh := make(chan struct{}) // stream closed (or failed to create) ctx, closeCtx = context.WithCancel(context.Background())
stoppedCh := make(chan struct{}) // sync loop stopped streamCh = make(chan *eventsource.Stream, 1)
streamCh := make(chan *eventsource.Stream, 1) wg sync.WaitGroup
)
// When connected to a Lodestar node the subscription blocks until the first actual
// event arrives; therefore we create the subscription in a separate goroutine while
// letting the main goroutine sync up to the current head.
wg.Add(1)
go func() { go func() {
defer close(closedCh) defer wg.Done()
// when connected to a Lodestar node the subscription blocks until the stream := api.startEventStream(ctx, &listener)
// first actual event arrives; therefore we create the subscription in if stream == nil {
// a separate goroutine while letting the main goroutine sync up to the // This case happens when the context was closed.
// current head
req, err := http.NewRequest("GET", api.url+
"/eth/v1/events?topics=head&topics=light_client_optimistic_update&topics=light_client_finality_update", nil)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription request: %v", err))
return
}
for k, v := range api.customHeaders {
req.Header.Set(k, v)
}
stream, err := eventsource.SubscribeWithRequest("", req)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription: %v", err))
close(streamCh)
return return
} }
// Stream was opened, wait for close signal.
streamCh <- stream streamCh <- stream
<-closeCh <-ctx.Done()
stream.Close() stream.Close()
}() }()
wg.Add(1)
go func() { go func() {
defer close(stoppedCh) defer wg.Done()
// Request initial data.
if head, err := api.GetHeader(common.Hash{}); err == nil { if head, err := api.GetHeader(common.Hash{}); err == nil {
listener.OnNewHead(head.Slot, head.Hash()) listener.OnNewHead(head.Slot, head.Hash())
} }
@ -458,32 +455,42 @@ func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func()
if finalityUpdate, err := api.GetFinalityUpdate(); err == nil { if finalityUpdate, err := api.GetFinalityUpdate(); err == nil {
listener.OnFinality(finalityUpdate) listener.OnFinality(finalityUpdate)
} }
stream := <-streamCh
if stream == nil { // Receive the stream.
var stream *eventsource.Stream
select {
case stream = <-streamCh:
case <-ctx.Done():
return return
} }
for { for {
select { select {
case <-ctx.Done():
stream.Close()
case event, ok := <-stream.Events: case event, ok := <-stream.Events:
if !ok { if !ok {
return return
} }
switch event.Event() { switch event.Event() {
case "head": case "head":
if slot, blockRoot, err := decodeHeadEvent([]byte(event.Data())); err == nil { slot, blockRoot, err := decodeHeadEvent([]byte(event.Data()))
if err == nil {
listener.OnNewHead(slot, blockRoot) listener.OnNewHead(slot, blockRoot)
} else { } else {
listener.OnError(fmt.Errorf("error decoding head event: %v", err)) listener.OnError(fmt.Errorf("error decoding head event: %v", err))
} }
case "light_client_optimistic_update": case "light_client_optimistic_update":
if signedHead, err := decodeOptimisticHeadUpdate([]byte(event.Data())); err == nil { signedHead, err := decodeOptimisticHeadUpdate([]byte(event.Data()))
if err == nil {
listener.OnSignedHead(signedHead) listener.OnSignedHead(signedHead)
} else { } else {
listener.OnError(fmt.Errorf("error decoding optimistic update event: %v", err)) listener.OnError(fmt.Errorf("error decoding optimistic update event: %v", err))
} }
case "light_client_finality_update": case "light_client_finality_update":
if finalityUpdate, err := decodeFinalityUpdate([]byte(event.Data())); err == nil { finalityUpdate, err := decodeFinalityUpdate([]byte(event.Data()))
if err == nil {
listener.OnFinality(finalityUpdate) listener.OnFinality(finalityUpdate)
} else { } else {
listener.OnError(fmt.Errorf("error decoding finality update event: %v", err)) listener.OnError(fmt.Errorf("error decoding finality update event: %v", err))
@ -491,6 +498,7 @@ func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func()
default: default:
listener.OnError(fmt.Errorf("unexpected event: %s", event.Event())) listener.OnError(fmt.Errorf("unexpected event: %s", event.Event()))
} }
case err, ok := <-stream.Errors: case err, ok := <-stream.Errors:
if !ok { if !ok {
return return
@ -499,9 +507,43 @@ func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func()
} }
} }
}() }()
return func() { return func() {
close(closeCh) closeCtx()
<-closedCh wg.Wait()
<-stoppedCh }
}
// startEventStream establishes an event stream. This will keep retrying until the stream has been
// established. It can only return nil when the context is canceled.
func (api *BeaconLightApi) startEventStream(ctx context.Context, listener *HeadEventListener) *eventsource.Stream {
for retry := true; retry; retry = ctxSleep(ctx, 5*time.Second) {
path := "/eth/v1/events?topics=head&topics=light_client_optimistic_update&topics=light_client_finality_update"
req, err := http.NewRequestWithContext(ctx, "GET", api.url+path, nil)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription request: %v", err))
continue
}
for k, v := range api.customHeaders {
req.Header.Set(k, v)
}
stream, err := eventsource.SubscribeWithRequest("", req)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription: %v", err))
continue
}
return stream
}
return nil
}
func ctxSleep(ctx context.Context, timeout time.Duration) (ok bool) {
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case <-timer.C:
return true
case <-ctx.Done():
return false
} }
} }

@ -56,7 +56,7 @@ func (h *HeadTracker) ValidatedHead() (types.SignedHeader, bool) {
return h.signedHead, h.hasSignedHead return h.signedHead, h.hasSignedHead
} }
// ValidatedHead returns the latest validated head. // ValidatedFinality returns the latest validated finality.
func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) { func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
h.lock.RLock() h.lock.RLock()
defer h.lock.RUnlock() defer h.lock.RUnlock()
@ -64,7 +64,7 @@ func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
return h.finalityUpdate, h.hasFinalityUpdate return h.finalityUpdate, h.hasFinalityUpdate
} }
// Validate validates the given signed head. If the head is successfully validated // ValidateHead validates the given signed head. If the head is successfully validated
// and it is better than the old validated head (higher slot or same slot and more // and it is better than the old validated head (higher slot or same slot and more
// signers) then ValidatedHead is updated. The boolean return flag signals if // signers) then ValidatedHead is updated. The boolean return flag signals if
// ValidatedHead has been changed. // ValidatedHead has been changed.

@ -212,7 +212,7 @@ func (s *serverWithTimeout) startTimeout(reqData RequestResponse) {
}) })
} }
// stop stops all goroutines associated with the server. // unsubscribe stops all goroutines associated with the server.
func (s *serverWithTimeout) unsubscribe() { func (s *serverWithTimeout) unsubscribe() {
s.lock.Lock() s.lock.Lock()
defer s.lock.Unlock() defer s.lock.Unlock()
@ -337,7 +337,7 @@ func (s *serverWithLimits) sendRequest(request Request) (reqId ID) {
return s.serverWithTimeout.sendRequest(request) return s.serverWithTimeout.sendRequest(request)
} }
// stop stops all goroutines associated with the server. // unsubscribe stops all goroutines associated with the server.
func (s *serverWithLimits) unsubscribe() { func (s *serverWithLimits) unsubscribe() {
s.lock.Lock() s.lock.Lock()
defer s.lock.Unlock() defer s.lock.Unlock()

@ -101,7 +101,7 @@ func (s *HeadSync) newSignedHead(server request.Server, signedHead types.SignedH
s.headTracker.ValidateHead(signedHead) s.headTracker.ValidateHead(signedHead)
} }
// newSignedHead handles received signed head; either validates it if the chain // newFinalityUpdate handles received finality update; either validates it if the chain
// is properly synced or stores it for further validation. // is properly synced or stores it for further validation.
func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) { func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) {
if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod { if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod {
@ -111,7 +111,7 @@ func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types
s.headTracker.ValidateFinality(finalityUpdate) s.headTracker.ValidateFinality(finalityUpdate)
} }
// processUnvalidatedHeads iterates the list of unvalidated heads and validates // processUnvalidated iterates the list of unvalidated heads and validates
// those which can be validated. // those which can be validated.
func (s *HeadSync) processUnvalidated() { func (s *HeadSync) processUnvalidated() {
if !s.chainInit { if !s.chainInit {

@ -19,7 +19,9 @@ package types
import ( import (
"crypto/sha256" "crypto/sha256"
"fmt" "fmt"
"math"
"os" "os"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -27,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/beacon/merkle" "github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
@ -34,6 +37,8 @@ import (
// across signing different data structures. // across signing different data structures.
const syncCommitteeDomain = 7 const syncCommitteeDomain = 7
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"}
// Fork describes a single beacon chain fork and also stores the calculated // Fork describes a single beacon chain fork and also stores the calculated
// signature domain used after this fork. // signature domain used after this fork.
type Fork struct { type Fork struct {
@ -46,6 +51,9 @@ type Fork struct {
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types // Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
Version []byte Version []byte
// index in list of known forks or MaxInt if unknown
knownIndex int
// calculated by computeDomain, based on fork version and genesis validators root // calculated by computeDomain, based on fork version and genesis validators root
domain merkle.Value domain merkle.Value
} }
@ -101,7 +109,12 @@ func (f Forks) SigningRoot(header Header) (common.Hash, error) {
func (f Forks) Len() int { return len(f) } func (f Forks) Len() int { return len(f) }
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] } func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
func (f Forks) Less(i, j int) bool { return f[i].Epoch < f[j].Epoch } func (f Forks) Less(i, j int) bool {
if f[i].Epoch != f[j].Epoch {
return f[i].Epoch < f[j].Epoch
}
return f[i].knownIndex < f[j].knownIndex
}
// ChainConfig contains the beacon chain configuration. // ChainConfig contains the beacon chain configuration.
type ChainConfig struct { type ChainConfig struct {
@ -122,16 +135,22 @@ func (c *ChainConfig) ForkAtEpoch(epoch uint64) Fork {
// AddFork adds a new item to the list of forks. // AddFork adds a new item to the list of forks.
func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainConfig { func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainConfig {
knownIndex := slices.Index(knownForks, name)
if knownIndex == -1 {
knownIndex = math.MaxInt // assume that the unknown fork happens after the known ones
if epoch != math.MaxUint64 {
log.Warn("Unknown fork in config.yaml", "fork name", name, "known forks", knownForks)
}
}
fork := &Fork{ fork := &Fork{
Name: name, Name: name,
Epoch: epoch, Epoch: epoch,
Version: version, Version: version,
knownIndex: knownIndex,
} }
fork.computeDomain(c.GenesisValidatorsRoot) fork.computeDomain(c.GenesisValidatorsRoot)
c.Forks = append(c.Forks, fork) c.Forks = append(c.Forks, fork)
sort.Sort(c.Forks) sort.Sort(c.Forks)
return c return c
} }
@ -181,6 +200,5 @@ func (c *ChainConfig) LoadForks(path string) error {
for name := range versions { for name := range versions {
return fmt.Errorf("epoch number missing for fork %q in beacon chain config file", name) return fmt.Errorf("epoch number missing for fork %q in beacon chain config file", name)
} }
sort.Sort(c.Forks)
return nil return nil
} }

@ -36,7 +36,7 @@ type ExecutionHeader struct {
obj headerObject obj headerObject
} }
// HeaderFromJSON decodes an execution header from JSON data provided by // ExecutionHeaderFromJSON decodes an execution header from JSON data provided by
// the beacon chain API. // the beacon chain API.
func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, error) { func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, error) {
var obj headerObject var obj headerObject

@ -5,22 +5,22 @@
# https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/ # https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
# version:golang 1.22.1 # version:golang 1.22.2
# https://go.dev/dl/ # https://go.dev/dl/
79c9b91d7f109515a25fc3ecdaad125d67e6bdb54f6d4d98580f46799caea321 go1.22.1.src.tar.gz 374ea82b289ec738e968267cac59c7d5ff180f9492250254784b2044e90df5a9 go1.22.2.src.tar.gz
3bc971772f4712fec0364f4bc3de06af22a00a12daab10b6f717fdcd13156cc0 go1.22.1.darwin-amd64.tar.gz 33e7f63077b1c5bce4f1ecadd4d990cf229667c40bfb00686990c950911b7ab7 go1.22.2.darwin-amd64.tar.gz
f6a9cec6b8a002fcc9c0ee24ec04d67f430a52abc3cfd613836986bcc00d8383 go1.22.1.darwin-arm64.tar.gz 660298be38648723e783ba0398e90431de1cb288c637880cdb124f39bd977f0d go1.22.2.darwin-arm64.tar.gz
99f81c10d5a3f8a886faf8fa86aaa2aaf929fbed54a972ae5eec3c5e0bdb961a go1.22.1.freebsd-386.tar.gz efc7162b0cad2f918ac566a923d4701feb29dc9c0ab625157d49b1cbcbba39da go1.22.2.freebsd-386.tar.gz
51c614ddd92ee4a9913a14c39bf80508d9cfba08561f24d2f075fd00f3cfb067 go1.22.1.freebsd-amd64.tar.gz d753428296e6709527e291fd204700a587ffef2c0a472b21aebea11618245929 go1.22.2.freebsd-amd64.tar.gz
8484df36d3d40139eaf0fe5e647b006435d826cc12f9ae72973bf7ec265e0ae4 go1.22.1.linux-386.tar.gz 586d9eb7fe0489ab297ad80dd06414997df487c5cf536c490ffeaa8d8f1807a7 go1.22.2.linux-386.tar.gz
aab8e15785c997ae20f9c88422ee35d962c4562212bb0f879d052a35c8307c7f go1.22.1.linux-amd64.tar.gz 5901c52b7a78002aeff14a21f93e0f064f74ce1360fce51c6ee68cd471216a17 go1.22.2.linux-amd64.tar.gz
e56685a245b6a0c592fc4a55f0b7803af5b3f827aaa29feab1f40e491acf35b8 go1.22.1.linux-arm64.tar.gz 36e720b2d564980c162a48c7e97da2e407dfcc4239e1e58d98082dfa2486a0c1 go1.22.2.linux-arm64.tar.gz
8cb7a90e48c20daed39a6ac8b8a40760030ba5e93c12274c42191d868687c281 go1.22.1.linux-armv6l.tar.gz 9243dfafde06e1efe24d59df6701818e6786b4adfdf1191098050d6d023c5369 go1.22.2.linux-armv6l.tar.gz
ac775e19d93cc1668999b77cfe8c8964abfbc658718feccfe6e0eb87663cd668 go1.22.1.linux-ppc64le.tar.gz 251a8886c5113be6490bdbb955ddee98763b49c9b1bf4c8364c02d3b482dab00 go1.22.2.linux-ppc64le.tar.gz
7bb7dd8e10f95c9a4cc4f6bef44c816a6e7c9e03f56ac6af6efbb082b19b379f go1.22.1.linux-s390x.tar.gz 2b39019481c28c560d65e9811a478ae10e3ef765e0f59af362031d386a71bfef go1.22.2.linux-s390x.tar.gz
0c5ebb7eb39b7884ec99f92b425d4c03a96a72443562aafbf6e7d15c42a3108a go1.22.1.windows-386.zip 651753c06df037020ef4d162c5b273452e9ba976ed17ae39e66ef7ee89d8147e go1.22.2.windows-386.zip
cf9c66a208a106402a527f5b956269ca506cfe535fc388e828d249ea88ed28ba go1.22.1.windows-amd64.zip 8e581cf330f49d3266e936521a2d8263679ef7e2fc2cbbceb85659122d883596 go1.22.2.windows-amd64.zip
85b8511b298c9f4199ecae26afafcc3d46155bac934d43f2357b9224bcaa310f go1.22.1.windows-arm64.zip ddfca5beb9a0c62254266c3090c2555d899bf3e7aa26243e7de3621108f06875 go1.22.2.windows-arm64.zip
# version:golangci 1.55.2 # version:golangci 1.55.2
# https://github.com/golangci/golangci-lint/releases/ # https://github.com/golangci/golangci-lint/releases/

@ -46,13 +46,12 @@ import (
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime" "runtime"
"slices"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"text/template" "text/template"
"time" "time"
"golang.org/x/exp/slices"
) )
var ( var (

@ -9,14 +9,14 @@ It enables usecases like the following:
The two main features that are required for this to work well are; The two main features that are required for this to work well are;
1. Rule Implementation: how to create, manage and interpret rules in a flexible but secure manner 1. Rule Implementation: how to create, manage, and interpret rules in a flexible but secure manner
2. Credential managements and credentials; how to provide auto-unlock without exposing keys unnecessarily. 2. Credential management and credentials; how to provide auto-unlock without exposing keys unnecessarily.
The section below deals with both of them The section below deals with both of them
## Rule Implementation ## Rule Implementation
A ruleset file is implemented as a `js` file. Under the hood, the ruleset-engine is a `SignerUI`, implementing the same methods as the `json-rpc` methods A ruleset file is implemented as a `js` file. Under the hood, the ruleset engine is a `SignerUI`, implementing the same methods as the `json-rpc` methods
defined in the UI protocol. Example: defined in the UI protocol. Example:
```js ```js
@ -27,7 +27,7 @@ function asBig(str) {
return new BigNumber(str) return new BigNumber(str)
} }
// Approve transactions to a certain contract if value is below a certain limit // Approve transactions to a certain contract if the value is below a certain limit
function ApproveTx(req) { function ApproveTx(req) {
var limit = big.Newint("0xb1a2bc2ec50000") var limit = big.Newint("0xb1a2bc2ec50000")
var value = asBig(req.transaction.value); var value = asBig(req.transaction.value);
@ -70,7 +70,7 @@ The Otto vm has a few [caveats](https://github.com/robertkrimen/otto):
Additionally, a few more have been added Additionally, a few more have been added
* The rule execution cannot load external javascript files. * The rule execution cannot load external javascript files.
* The only preloaded library is [`bignumber.js`](https://github.com/MikeMcl/bignumber.js) version `2.0.3`. This one is fairly old, and is not aligned with the documentation at the github repository. * The only preloaded library is [`bignumber.js`](https://github.com/MikeMcl/bignumber.js) version `2.0.3`. This one is fairly old, and is not aligned with the documentation at the GitHub repository.
* Each invocation is made in a fresh virtual machine. This means that you cannot store data in global variables between invocations. This is a deliberate choice -- if you want to store data, use the disk-backed `storage`, since rules should not rely on ephemeral data. * Each invocation is made in a fresh virtual machine. This means that you cannot store data in global variables between invocations. This is a deliberate choice -- if you want to store data, use the disk-backed `storage`, since rules should not rely on ephemeral data.
* Javascript API parameters are _always_ an object. This is also a design choice, to ensure that parameters are accessed by _key_ and not by order. This is to prevent mistakes due to missing parameters or parameter changes. * Javascript API parameters are _always_ an object. This is also a design choice, to ensure that parameters are accessed by _key_ and not by order. This is to prevent mistakes due to missing parameters or parameter changes.
* The JS engine has access to `storage` and `console`. * The JS engine has access to `storage` and `console`.
@ -88,8 +88,8 @@ Some security precautions can be made, such as:
##### Security of implementation ##### Security of implementation
The drawbacks of this very flexible solution is that the `signer` needs to contain a javascript engine. This is pretty simple to implement, since it's already The drawback of this very flexible solution is that the `signer` needs to contain a javascript engine. This is pretty simple to implement since it's already
implemented for `geth`. There are no known security vulnerabilities in, nor have we had any security-problems with it so far. implemented for `geth`. There are no known security vulnerabilities in it, nor have we had any security problems with it so far.
The javascript engine would be an added attack surface; but if the validation of `rulesets` is made good (with hash-based attestation), the actual javascript cannot be considered The javascript engine would be an added attack surface; but if the validation of `rulesets` is made good (with hash-based attestation), the actual javascript cannot be considered
an attack surface -- if an attacker can control the ruleset, a much simpler attack would be to implement an "always-approve" rule instead of exploiting the js vm. The only benefit an attack surface -- if an attacker can control the ruleset, a much simpler attack would be to implement an "always-approve" rule instead of exploiting the js vm. The only benefit
@ -105,7 +105,7 @@ It's unclear whether any other DSL could be more secure; since there's always th
## Credential management ## Credential management
The ability to auto-approve transaction means that the signer needs to have necessary credentials to decrypt keyfiles. These passwords are hereafter called `ksp` (keystore pass). The ability to auto-approve transactions means that the signer needs to have the necessary credentials to decrypt keyfiles. These passwords are hereafter called `ksp` (keystore pass).
### Example implementation ### Example implementation
@ -127,8 +127,8 @@ The `vault.dat` would be an encrypted container storing the following informatio
### Security considerations ### Security considerations
This would leave it up to the user to ensure that the `path/to/masterseed` is handled in a secure way. It's difficult to get around this, although one could This would leave it up to the user to ensure that the `path/to/masterseed` is handled securely. It's difficult to get around this, although one could
imagine leveraging OS-level keychains where supported. The setup is however in general similar to how ssh-keys are stored in `.ssh/`. imagine leveraging OS-level keychains where supported. The setup is however, in general, similar to how ssh-keys are stored in `.ssh/`.
# Implementation status # Implementation status
@ -149,7 +149,7 @@ function big(str) {
// Time window: 1 week // Time window: 1 week
var window = 1000* 3600*24*7; var window = 1000* 3600*24*7;
// Limit : 1 ether // Limit: 1 ether
var limit = new BigNumber("1e18"); var limit = new BigNumber("1e18");
function isLimitOk(transaction) { function isLimitOk(transaction) {
@ -163,7 +163,7 @@ function isLimitOk(transaction) {
if (stored != "") { if (stored != "") {
txs = JSON.parse(stored) txs = JSON.parse(stored)
} }
// First, remove all that have passed out of the time-window // First, remove all that has passed out of the time window
var newtxs = txs.filter(function(tx){return tx.tstamp > windowstart}); var newtxs = txs.filter(function(tx){return tx.tstamp > windowstart});
console.log(txs, newtxs.length); console.log(txs, newtxs.length);
@ -174,7 +174,7 @@ function isLimitOk(transaction) {
console.log("ApproveTx > Sum so far", sum); console.log("ApproveTx > Sum so far", sum);
console.log("ApproveTx > Requested", value.toNumber()); console.log("ApproveTx > Requested", value.toNumber());
// Would we exceed weekly limit ? // Would we exceed the weekly limit ?
return sum.plus(value).lt(limit) return sum.plus(value).lt(limit)
} }

@ -20,6 +20,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"slices"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -32,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/dnsdisc"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/exp/slices"
) )
const ( const (

@ -27,6 +27,7 @@ import (
"math/big" "math/big"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"sort" "sort"
"strings" "strings"
@ -40,7 +41,6 @@ import (
"github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/exp/slices"
) )
// Chain is a lightweight blockchain-like store which can read a hivechain // Chain is a lightweight blockchain-like store which can read a hivechain

@ -102,7 +102,7 @@ func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error {
} }
} }
return fmt.Errorf("timed out waiting for txs") return errors.New("timed out waiting for txs")
} }
func (s *Suite) sendInvalidTxs(t *utesting.T, txs []*types.Transaction) error { func (s *Suite) sendInvalidTxs(t *utesting.T, txs []*types.Transaction) error {

@ -19,6 +19,7 @@ package v5test
import ( import (
"bytes" "bytes"
"net" "net"
"slices"
"sync" "sync"
"time" "time"
@ -266,7 +267,7 @@ func (s *Suite) TestFindnodeResults(t *utesting.T) {
n := bn.conn.localNode.Node() n := bn.conn.localNode.Node()
expect[n.ID()] = n expect[n.ID()] = n
d := uint(enode.LogDist(n.ID(), s.Dest.ID())) d := uint(enode.LogDist(n.ID(), s.Dest.ID()))
if !containsUint(dists, d) { if !slices.Contains(dists, d) {
dists = append(dists, d) dists = append(dists, d)
} }
} }

@ -252,12 +252,3 @@ func checkRecords(records []*enr.Record) ([]*enode.Node, error) {
} }
return nodes, nil return nodes, nil
} }
func containsUint(ints []uint, x uint) bool {
for i := range ints {
if ints[i] == x {
return true
}
}
return false
}

@ -21,11 +21,11 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
"slices"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"golang.org/x/exp/slices"
) )
const jsonIndent = " " const jsonIndent = " "

@ -50,4 +50,4 @@ contains the password.
## JSON ## JSON
In case you need to output the result in a JSON format, you shall by using the `--json` flag. In case you need to output the result in a JSON format, you shall use the `--json` flag.

@ -50,6 +50,10 @@ var (
Name: "trace.returndata", Name: "trace.returndata",
Usage: "Enable return data output in traces", Usage: "Enable return data output in traces",
} }
TraceEnableCallFramesFlag = &cli.BoolFlag{
Name: "trace.callframes",
Usage: "Enable call frames output in traces",
}
OutputBasedir = &cli.StringFlag{ OutputBasedir = &cli.StringFlag{
Name: "output.basedir", Name: "output.basedir",
Usage: "Specifies where output files are placed. Will be created if it does not exist.", Usage: "Specifies where output files are placed. Will be created if it does not exist.",

@ -29,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers"
@ -101,9 +102,14 @@ func Transition(ctx *cli.Context) error {
if err != nil { if err != nil {
return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
} }
logger := logger.NewJSONLogger(logConfig, traceFile) var l *tracing.Hooks
if ctx.Bool(TraceEnableCallFramesFlag.Name) {
l = logger.NewJSONLoggerWithCallFrames(logConfig, traceFile)
} else {
l = logger.NewJSONLogger(logConfig, traceFile)
}
tracer := &tracers.Tracer{ tracer := &tracers.Tracer{
Hooks: logger, Hooks: l,
// jsonLogger streams out result to file. // jsonLogger streams out result to file.
GetResult: func() (json.RawMessage, error) { return nil, nil }, GetResult: func() (json.RawMessage, error) { return nil, nil },
Stop: func(err error) {}, Stop: func(err error) {},

@ -152,6 +152,7 @@ var stateTransitionCommand = &cli.Command{
t8ntool.TraceEnableMemoryFlag, t8ntool.TraceEnableMemoryFlag,
t8ntool.TraceDisableStackFlag, t8ntool.TraceDisableStackFlag,
t8ntool.TraceEnableReturnDataFlag, t8ntool.TraceEnableReturnDataFlag,
t8ntool.TraceEnableCallFramesFlag,
t8ntool.OutputBasedir, t8ntool.OutputBasedir,
t8ntool.OutputAllocFlag, t8ntool.OutputAllocFlag,
t8ntool.OutputResultFlag, t8ntool.OutputResultFlag,

@ -272,8 +272,17 @@ func runCmd(ctx *cli.Context) error {
output, leftOverGas, stats, err := timedExec(bench, execFunc) output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) { if ctx.Bool(DumpFlag.Name) {
statedb.Commit(genesisConfig.Number, true) root, err := statedb.Commit(genesisConfig.Number, true)
fmt.Println(string(statedb.Dump(nil))) if err != nil {
fmt.Printf("Failed to commit changes %v\n", err)
return err
}
dumpdb, err := state.New(root, sdb, nil)
if err != nil {
fmt.Printf("Failed to open statedb %v\n", err)
return err
}
fmt.Println(string(dumpdb.Dump(nil)))
} }
if ctx.Bool(DebugFlag.Name) { if ctx.Bool(DebugFlag.Name) {

@ -375,6 +375,14 @@ func TestT8nTracing(t *testing.T) {
}`}, }`},
expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.json"}, expectedTraces: []string{"trace-0-0x88f5fbd1524731a81e49f637aa847543268a5aaf2a6b32a69d2c6d978c45dcfb.json"},
}, },
{
base: "./testdata/32",
input: t8nInput{
"alloc.json", "txs.json", "env.json", "Merge", "",
},
extraArgs: []string{"--trace", "--trace.callframes"},
expectedTraces: []string{"trace-0-0x47806361c0fa084be3caa18afe8c48156747c01dbdfc1ee11b5aecdbe4fcf23e.jsonl"},
},
} { } {
args := []string{"t8n"} args := []string{"t8n"}
args = append(args, tc.input.get(tc.base)...) args = append(args, tc.input.get(tc.base)...)

@ -0,0 +1 @@
This test does some EVM execution, and can be used to test callframes emitted by the tracer when they are enabled.

@ -0,0 +1,30 @@
{
"0x8a0a19589531694250d570040a0c4b74576919b8": {
"nonce": "0x00",
"balance": "0x0de0b6b3a7640000",
"code": "0x600060006000600060007310000000000000000000000000000000000000015af1600155600060006000600060007310000000000000000000000000000000000000025af16002553d600060003e600051600355",
"storage": {
"0x01": "0x0100",
"0x02": "0x0100",
"0x03": "0x0100"
}
},
"0x1000000000000000000000000000000000000001": {
"nonce": "0x00",
"balance": "0x29a2241af62c0000",
"code": "0x6103e8ff",
"storage": {}
},
"0x1000000000000000000000000000000000000002": {
"nonce": "0x00",
"balance": "0x4563918244f40000",
"code": "0x600060006000600060647310000000000000000000000000000000000000015af1600f0160005260206000fd",
"storage": {}
},
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
"nonce": "0x00",
"balance": "0x6124fee993bc0000",
"code": "0x",
"storage": {}
}
}

@ -0,0 +1,12 @@
{
"currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
"currentGasLimit": "71794957647893862",
"currentNumber": "1",
"currentTimestamp": "1000",
"currentRandom": "0",
"currentDifficulty": "0",
"blockHashes": {},
"ommers": [],
"currentBaseFee": "7",
"parentUncleHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}

@ -0,0 +1,61 @@
{"from":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b","to":"0x8a0a19589531694250d570040a0c4b74576919b8","gas":"0x74f18","value":"0x0","type":"CALL"}
{"pc":0,"op":96,"gas":"0x74f18","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":2,"op":96,"gas":"0x74f15","gasCost":"0x3","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":4,"op":96,"gas":"0x74f12","gasCost":"0x3","memSize":0,"stack":["0x0","0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":6,"op":96,"gas":"0x74f0f","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":8,"op":96,"gas":"0x74f0c","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0","0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":10,"op":115,"gas":"0x74f09","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x0"],"depth":1,"refund":0,"opName":"PUSH20"}
{"pc":31,"op":90,"gas":"0x74f06","gasCost":"0x2","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x0","0x1000000000000000000000000000000000000001"],"depth":1,"refund":0,"opName":"GAS"}
{"pc":32,"op":241,"gas":"0x74f04","gasCost":"0x731f1","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x0","0x1000000000000000000000000000000000000001","0x74f04"],"depth":1,"refund":0,"opName":"CALL"}
{"from":"0x8a0a19589531694250d570040a0c4b74576919b8","to":"0x1000000000000000000000000000000000000001","gas":"0x727c9","value":"0x0","type":"CALL"}
{"pc":0,"op":97,"gas":"0x727c9","gasCost":"0x3","memSize":0,"stack":[],"depth":2,"refund":0,"opName":"PUSH2"}
{"pc":3,"op":255,"gas":"0x727c6","gasCost":"0x7f58","memSize":0,"stack":["0x3e8"],"depth":2,"refund":0,"opName":"SELFDESTRUCT"}
{"from":"0x1000000000000000000000000000000000000001","to":"0x00000000000000000000000000000000000003e8","gas":"0x0","value":"0x29a2241af62c0000","type":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x7f5b"}
{"pc":33,"op":96,"gas":"0x6c581","gasCost":"0x3","memSize":0,"stack":["0x1"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":35,"op":85,"gas":"0x6c57e","gasCost":"0x1388","memSize":0,"stack":["0x1","0x1"],"depth":1,"refund":0,"opName":"SSTORE"}
{"pc":36,"op":96,"gas":"0x6b1f6","gasCost":"0x3","memSize":0,"stack":[],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":38,"op":96,"gas":"0x6b1f3","gasCost":"0x3","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":40,"op":96,"gas":"0x6b1f0","gasCost":"0x3","memSize":0,"stack":["0x0","0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":42,"op":96,"gas":"0x6b1ed","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":44,"op":96,"gas":"0x6b1ea","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0","0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":46,"op":115,"gas":"0x6b1e7","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x0"],"depth":1,"refund":0,"opName":"PUSH20"}
{"pc":67,"op":90,"gas":"0x6b1e4","gasCost":"0x2","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x0","0x1000000000000000000000000000000000000002"],"depth":1,"refund":0,"opName":"GAS"}
{"pc":68,"op":241,"gas":"0x6b1e2","gasCost":"0x69744","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x0","0x1000000000000000000000000000000000000002","0x6b1e2"],"depth":1,"refund":0,"opName":"CALL"}
{"from":"0x8a0a19589531694250d570040a0c4b74576919b8","to":"0x1000000000000000000000000000000000000002","gas":"0x68d1c","value":"0x0","type":"CALL"}
{"pc":0,"op":96,"gas":"0x68d1c","gasCost":"0x3","memSize":0,"stack":[],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":2,"op":96,"gas":"0x68d19","gasCost":"0x3","memSize":0,"stack":["0x0"],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":4,"op":96,"gas":"0x68d16","gasCost":"0x3","memSize":0,"stack":["0x0","0x0"],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":6,"op":96,"gas":"0x68d13","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0"],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":8,"op":96,"gas":"0x68d10","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0","0x0"],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":10,"op":115,"gas":"0x68d0d","gasCost":"0x3","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x64"],"depth":2,"refund":0,"opName":"PUSH20"}
{"pc":31,"op":90,"gas":"0x68d0a","gasCost":"0x2","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x64","0x1000000000000000000000000000000000000001"],"depth":2,"refund":0,"opName":"GAS"}
{"pc":32,"op":241,"gas":"0x68d08","gasCost":"0x67363","memSize":0,"stack":["0x0","0x0","0x0","0x0","0x64","0x1000000000000000000000000000000000000001","0x68d08"],"depth":2,"refund":0,"opName":"CALL"}
{"from":"0x1000000000000000000000000000000000000002","to":"0x1000000000000000000000000000000000000001","gas":"0x658d3","value":"0x64","type":"CALL"}
{"pc":0,"op":97,"gas":"0x658d3","gasCost":"0x3","memSize":0,"stack":[],"depth":3,"refund":0,"opName":"PUSH2"}
{"pc":3,"op":255,"gas":"0x658d0","gasCost":"0x1388","memSize":0,"stack":["0x3e8"],"depth":3,"refund":0,"opName":"SELFDESTRUCT"}
{"from":"0x1000000000000000000000000000000000000001","to":"0x00000000000000000000000000000000000003e8","gas":"0x0","value":"0x64","type":"SELFDESTRUCT"}
{"output":"","gasUsed":"0x0"}
{"output":"","gasUsed":"0x138b"}
{"pc":33,"op":96,"gas":"0x65eed","gasCost":"0x3","memSize":0,"stack":["0x1"],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":35,"op":1,"gas":"0x65eea","gasCost":"0x3","memSize":0,"stack":["0x1","0xf"],"depth":2,"refund":0,"opName":"ADD"}
{"pc":36,"op":96,"gas":"0x65ee7","gasCost":"0x3","memSize":0,"stack":["0x10"],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":38,"op":82,"gas":"0x65ee4","gasCost":"0x6","memSize":0,"stack":["0x10","0x0"],"depth":2,"refund":0,"opName":"MSTORE"}
{"pc":39,"op":96,"gas":"0x65ede","gasCost":"0x3","memSize":32,"stack":[],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":41,"op":96,"gas":"0x65edb","gasCost":"0x3","memSize":32,"stack":["0x20"],"depth":2,"refund":0,"opName":"PUSH1"}
{"pc":43,"op":253,"gas":"0x65ed8","gasCost":"0x0","memSize":32,"stack":["0x20","0x0"],"depth":2,"refund":0,"opName":"REVERT"}
{"pc":43,"op":253,"gas":"0x65ed8","gasCost":"0x0","memSize":32,"stack":[],"depth":2,"refund":0,"opName":"REVERT","error":"execution reverted"}
{"output":"0000000000000000000000000000000000000000000000000000000000000010","gasUsed":"0x2e44","error":"execution reverted"}
{"pc":69,"op":96,"gas":"0x67976","gasCost":"0x3","memSize":0,"stack":["0x0"],"depth":1,"refund":0,"opName":"PUSH1"}
{"pc":71,"op":85,"gas":"0x67973","gasCost":"0x1388","memSize":0,"stack":["0x0","0x2"],"depth":1,"refund":4800,"opName":"SSTORE"}
{"pc":72,"op":61,"gas":"0x665eb","gasCost":"0x2","memSize":0,"stack":[],"depth":1,"refund":4800,"opName":"RETURNDATASIZE"}
{"pc":73,"op":96,"gas":"0x665e9","gasCost":"0x3","memSize":0,"stack":["0x20"],"depth":1,"refund":4800,"opName":"PUSH1"}
{"pc":75,"op":96,"gas":"0x665e6","gasCost":"0x3","memSize":0,"stack":["0x20","0x0"],"depth":1,"refund":4800,"opName":"PUSH1"}
{"pc":77,"op":62,"gas":"0x665e3","gasCost":"0x9","memSize":0,"stack":["0x20","0x0","0x0"],"depth":1,"refund":4800,"opName":"RETURNDATACOPY"}
{"pc":78,"op":96,"gas":"0x665da","gasCost":"0x3","memSize":32,"stack":[],"depth":1,"refund":4800,"opName":"PUSH1"}
{"pc":80,"op":81,"gas":"0x665d7","gasCost":"0x3","memSize":32,"stack":["0x0"],"depth":1,"refund":4800,"opName":"MLOAD"}
{"pc":81,"op":96,"gas":"0x665d4","gasCost":"0x3","memSize":32,"stack":["0x10"],"depth":1,"refund":4800,"opName":"PUSH1"}
{"pc":83,"op":85,"gas":"0x665d1","gasCost":"0x1388","memSize":32,"stack":["0x10","0x3"],"depth":1,"refund":4800,"opName":"SSTORE"}
{"pc":84,"op":0,"gas":"0x65249","gasCost":"0x0","memSize":32,"stack":[],"depth":1,"refund":4800,"opName":"STOP"}
{"output":"","gasUsed":"0xfccf"}

@ -0,0 +1,17 @@
[
{
"type": "0x0",
"chainId": "0x0",
"nonce": "0x0",
"gasPrice": "0xa",
"gas": "0x7a120",
"to": "0x8a0a19589531694250d570040a0c4b74576919b8",
"value": "0x0",
"input": "0x",
"v": "0x1c",
"r": "0x9a207ad45b7fc2aa5f8e72a30487f2b0bc489778e6d022f19036efdf2a922a17",
"s": "0x640d4da05078b5a4aa561f1b4d58176ea828bfa0f88d27d14459c1d789e1a1eb",
"sender": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"
}
]

@ -48,7 +48,7 @@ func TestAttachWithHeaders(t *testing.T) {
// This is fixed in a follow-up PR. // This is fixed in a follow-up PR.
} }
// TestAttachWithHeaders tests that 'geth db --remotedb' with custom headers works, i.e // TestRemoteDbWithHeaders tests that 'geth db --remotedb' with custom headers works, i.e
// that custom headers are forwarded to the target. // that custom headers are forwarded to the target.
func TestRemoteDbWithHeaders(t *testing.T) { func TestRemoteDbWithHeaders(t *testing.T) {
t.Parallel() t.Parallel()

@ -97,7 +97,7 @@ func testExport(t *testing.T, f string) {
} }
} }
// testDeletion tests if the deletion markers can be exported/imported correctly // TestDeletionExport tests if the deletion markers can be exported/imported correctly
func TestDeletionExport(t *testing.T) { func TestDeletionExport(t *testing.T) {
f := fmt.Sprintf("%v/tempdump", os.TempDir()) f := fmt.Sprintf("%v/tempdump", os.TempDir())
defer func() { defer func() {

@ -115,9 +115,7 @@ func (c *BasicLRU[K, V]) Peek(key K) (value V, ok bool) {
// Purge empties the cache. // Purge empties the cache.
func (c *BasicLRU[K, V]) Purge() { func (c *BasicLRU[K, V]) Purge() {
c.list.init() c.list.init()
for k := range c.items { clear(c.items)
delete(c.items, k)
}
} }
// Remove drops an item from the cache. Returns true if the key was present in cache. // Remove drops an item from the cache. Returns true if the key was present in cache.
@ -174,7 +172,7 @@ func (l *list[T]) init() {
l.root.prev = &l.root l.root.prev = &l.root
} }
// push adds an element to the front of the list. // pushElem adds an element to the front of the list.
func (l *list[T]) pushElem(e *listElem[T]) { func (l *list[T]) pushElem(e *listElem[T]) {
e.prev = &l.root e.prev = &l.root
e.next = l.root.next e.next = l.root.next

@ -17,11 +17,11 @@
package prque package prque
import ( import (
"cmp"
"container/heap" "container/heap"
"time" "time"
"github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/common/mclock"
"golang.org/x/exp/constraints"
) )
// LazyQueue is a priority queue data structure where priorities can change over // LazyQueue is a priority queue data structure where priorities can change over
@ -33,7 +33,7 @@ import (
// //
// If the upper estimate is exceeded then Update should be called for that item. // If the upper estimate is exceeded then Update should be called for that item.
// A global Refresh function should also be called periodically. // A global Refresh function should also be called periodically.
type LazyQueue[P constraints.Ordered, V any] struct { type LazyQueue[P cmp.Ordered, V any] struct {
clock mclock.Clock clock mclock.Clock
// Items are stored in one of two internal queues ordered by estimated max // Items are stored in one of two internal queues ordered by estimated max
// priority until the next and the next-after-next refresh. Update and Refresh // priority until the next and the next-after-next refresh. Update and Refresh
@ -50,12 +50,12 @@ type LazyQueue[P constraints.Ordered, V any] struct {
} }
type ( type (
PriorityCallback[P constraints.Ordered, V any] func(data V) P // actual priority callback PriorityCallback[P cmp.Ordered, V any] func(data V) P // actual priority callback
MaxPriorityCallback[P constraints.Ordered, V any] func(data V, until mclock.AbsTime) P // estimated maximum priority callback MaxPriorityCallback[P cmp.Ordered, V any] func(data V, until mclock.AbsTime) P // estimated maximum priority callback
) )
// NewLazyQueue creates a new lazy queue // NewLazyQueue creates a new lazy queue
func NewLazyQueue[P constraints.Ordered, V any](setIndex SetIndexCallback[V], priority PriorityCallback[P, V], maxPriority MaxPriorityCallback[P, V], clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue[P, V] { func NewLazyQueue[P cmp.Ordered, V any](setIndex SetIndexCallback[V], priority PriorityCallback[P, V], maxPriority MaxPriorityCallback[P, V], clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue[P, V] {
q := &LazyQueue[P, V]{ q := &LazyQueue[P, V]{
popQueue: newSstack[P, V](nil), popQueue: newSstack[P, V](nil),
setIndex: setIndex, setIndex: setIndex,

@ -18,22 +18,21 @@
package prque package prque
import ( import (
"cmp"
"container/heap" "container/heap"
"golang.org/x/exp/constraints"
) )
// Priority queue data structure. // Prque is a priority queue data structure.
type Prque[P constraints.Ordered, V any] struct { type Prque[P cmp.Ordered, V any] struct {
cont *sstack[P, V] cont *sstack[P, V]
} }
// New creates a new priority queue. // New creates a new priority queue.
func New[P constraints.Ordered, V any](setIndex SetIndexCallback[V]) *Prque[P, V] { func New[P cmp.Ordered, V any](setIndex SetIndexCallback[V]) *Prque[P, V] {
return &Prque[P, V]{newSstack[P, V](setIndex)} return &Prque[P, V]{newSstack[P, V](setIndex)}
} }
// Pushes a value with a given priority into the queue, expanding if necessary. // Push a value with a given priority into the queue, expanding if necessary.
func (p *Prque[P, V]) Push(data V, priority P) { func (p *Prque[P, V]) Push(data V, priority P) {
heap.Push(p.cont, &item[P, V]{data, priority}) heap.Push(p.cont, &item[P, V]{data, priority})
} }
@ -44,14 +43,14 @@ func (p *Prque[P, V]) Peek() (V, P) {
return item.value, item.priority return item.value, item.priority
} }
// Pops the value with the greatest priority off the stack and returns it. // Pop the value with the greatest priority off the stack and returns it.
// Currently no shrinking is done. // Currently no shrinking is done.
func (p *Prque[P, V]) Pop() (V, P) { func (p *Prque[P, V]) Pop() (V, P) {
item := heap.Pop(p.cont).(*item[P, V]) item := heap.Pop(p.cont).(*item[P, V])
return item.value, item.priority return item.value, item.priority
} }
// Pops only the item from the queue, dropping the associated priority value. // PopItem pops only the item from the queue, dropping the associated priority value.
func (p *Prque[P, V]) PopItem() V { func (p *Prque[P, V]) PopItem() V {
return heap.Pop(p.cont).(*item[P, V]).value return heap.Pop(p.cont).(*item[P, V]).value
} }
@ -61,17 +60,17 @@ func (p *Prque[P, V]) Remove(i int) V {
return heap.Remove(p.cont, i).(*item[P, V]).value return heap.Remove(p.cont, i).(*item[P, V]).value
} }
// Checks whether the priority queue is empty. // Empty checks whether the priority queue is empty.
func (p *Prque[P, V]) Empty() bool { func (p *Prque[P, V]) Empty() bool {
return p.cont.Len() == 0 return p.cont.Len() == 0
} }
// Returns the number of element in the priority queue. // Size returns the number of element in the priority queue.
func (p *Prque[P, V]) Size() int { func (p *Prque[P, V]) Size() int {
return p.cont.Len() return p.cont.Len()
} }
// Clears the contents of the priority queue. // Reset clears the contents of the priority queue.
func (p *Prque[P, V]) Reset() { func (p *Prque[P, V]) Reset() {
*p = *New[P, V](p.cont.setIndex) *p = *New[P, V](p.cont.setIndex)
} }

@ -10,13 +10,13 @@
package prque package prque
import "golang.org/x/exp/constraints" import "cmp"
// The size of a block of data // The size of a block of data
const blockSize = 4096 const blockSize = 4096
// A prioritized item in the sorted stack. // A prioritized item in the sorted stack.
type item[P constraints.Ordered, V any] struct { type item[P cmp.Ordered, V any] struct {
value V value V
priority P priority P
} }
@ -29,7 +29,7 @@ type SetIndexCallback[V any] func(data V, index int)
// Internal sortable stack data structure. Implements the Push and Pop ops for // Internal sortable stack data structure. Implements the Push and Pop ops for
// the stack (heap) functionality and the Len, Less and Swap methods for the // the stack (heap) functionality and the Len, Less and Swap methods for the
// sortability requirements of the heaps. // sortability requirements of the heaps.
type sstack[P constraints.Ordered, V any] struct { type sstack[P cmp.Ordered, V any] struct {
setIndex SetIndexCallback[V] setIndex SetIndexCallback[V]
size int size int
capacity int capacity int
@ -40,7 +40,7 @@ type sstack[P constraints.Ordered, V any] struct {
} }
// Creates a new, empty stack. // Creates a new, empty stack.
func newSstack[P constraints.Ordered, V any](setIndex SetIndexCallback[V]) *sstack[P, V] { func newSstack[P cmp.Ordered, V any](setIndex SetIndexCallback[V]) *sstack[P, V] {
result := new(sstack[P, V]) result := new(sstack[P, V])
result.setIndex = setIndex result.setIndex = setIndex
result.active = make([]*item[P, V], blockSize) result.active = make([]*item[P, V], blockSize)
@ -49,7 +49,7 @@ func newSstack[P constraints.Ordered, V any](setIndex SetIndexCallback[V]) *ssta
return result return result
} }
// Pushes a value onto the stack, expanding it if necessary. Required by // Push a value onto the stack, expanding it if necessary. Required by
// heap.Interface. // heap.Interface.
func (s *sstack[P, V]) Push(data any) { func (s *sstack[P, V]) Push(data any) {
if s.size == s.capacity { if s.size == s.capacity {
@ -69,7 +69,7 @@ func (s *sstack[P, V]) Push(data any) {
s.size++ s.size++
} }
// Pops a value off the stack and returns it. Currently no shrinking is done. // Pop a value off the stack and returns it. Currently no shrinking is done.
// Required by heap.Interface. // Required by heap.Interface.
func (s *sstack[P, V]) Pop() (res any) { func (s *sstack[P, V]) Pop() (res any) {
s.size-- s.size--
@ -85,18 +85,18 @@ func (s *sstack[P, V]) Pop() (res any) {
return return
} }
// Returns the length of the stack. Required by sort.Interface. // Len returns the length of the stack. Required by sort.Interface.
func (s *sstack[P, V]) Len() int { func (s *sstack[P, V]) Len() int {
return s.size return s.size
} }
// Compares the priority of two elements of the stack (higher is first). // Less compares the priority of two elements of the stack (higher is first).
// Required by sort.Interface. // Required by sort.Interface.
func (s *sstack[P, V]) Less(i, j int) bool { func (s *sstack[P, V]) Less(i, j int) bool {
return s.blocks[i/blockSize][i%blockSize].priority > s.blocks[j/blockSize][j%blockSize].priority return s.blocks[i/blockSize][i%blockSize].priority > s.blocks[j/blockSize][j%blockSize].priority
} }
// Swaps two elements in the stack. Required by sort.Interface. // Swap two elements in the stack. Required by sort.Interface.
func (s *sstack[P, V]) Swap(i, j int) { func (s *sstack[P, V]) Swap(i, j int) {
ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize
a, b := s.blocks[jb][jo], s.blocks[ib][io] a, b := s.blocks[jb][jo], s.blocks[ib][io]
@ -107,7 +107,7 @@ func (s *sstack[P, V]) Swap(i, j int) {
s.blocks[ib][io], s.blocks[jb][jo] = a, b s.blocks[ib][io], s.blocks[jb][jo] = a, b
} }
// Resets the stack, effectively clearing its contents. // Reset the stack, effectively clearing its contents.
func (s *sstack[P, V]) Reset() { func (s *sstack[P, V]) Reset() {
*s = *newSstack[P, V](s.setIndex) *s = *newSstack[P, V](s.setIndex)
} }

@ -19,6 +19,7 @@ package clique
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"slices"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -28,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"golang.org/x/exp/slices"
) )
// Vote represents a single vote that an authorized signer made to modify the // Vote represents a single vote that an authorized signer made to modify the

@ -21,6 +21,7 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"fmt" "fmt"
"math/big" "math/big"
"slices"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -30,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"golang.org/x/exp/slices"
) )
// testerAccountPool is a pool to maintain currently active tester accounts, // testerAccountPool is a pool to maintain currently active tester accounts,

@ -568,7 +568,7 @@ var (
u256_32 = uint256.NewInt(32) u256_32 = uint256.NewInt(32)
) )
// AccumulateRewards credits the coinbase of the given block with the mining // accumulateRewards credits the coinbase of the given block with the mining
// reward. The total reward consists of the static block reward and rewards for // reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded. // included uncles. The coinbase of each uncle block is also rewarded.
func accumulateRewards(config *params.ChainConfig, stateDB *state.StateDB, header *types.Header, uncles []*types.Header) { func accumulateRewards(config *params.ChainConfig, stateDB *state.StateDB, header *types.Header, uncles []*types.Header) {

@ -127,7 +127,7 @@ func (l *lexer) ignore() {
l.start = l.pos l.start = l.pos
} }
// Accepts checks whether the given input matches the next rune // accept checks whether the given input matches the next rune
func (l *lexer) accept(valid string) bool { func (l *lexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) { if strings.ContainsRune(valid, l.next()) {
return true return true

@ -132,7 +132,7 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
if rbloom != header.Bloom { if rbloom != header.Bloom {
return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom) return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
} }
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, Rn]])) // The receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, Rn]]))
receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil)) receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil))
if receiptSha != header.ReceiptHash { if receiptSha != header.ReceiptHash {
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha) return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)

@ -147,8 +147,11 @@ type CacheConfig struct {
} }
// triedbConfig derives the configures for trie database. // triedbConfig derives the configures for trie database.
func (c *CacheConfig) triedbConfig() *triedb.Config { func (c *CacheConfig) triedbConfig(isVerkle bool) *triedb.Config {
config := &triedb.Config{Preimages: c.Preimages} config := &triedb.Config{
Preimages: c.Preimages,
IsVerkle: isVerkle,
}
if c.StateScheme == rawdb.HashScheme { if c.StateScheme == rawdb.HashScheme {
config.HashDB = &hashdb.Config{ config.HashDB = &hashdb.Config{
CleanCacheSize: c.TrieCleanLimit * 1024 * 1024, CleanCacheSize: c.TrieCleanLimit * 1024 * 1024,
@ -241,6 +244,8 @@ type BlockChain struct {
bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue] bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue]
receiptsCache *lru.Cache[common.Hash, []*types.Receipt] receiptsCache *lru.Cache[common.Hash, []*types.Receipt]
blockCache *lru.Cache[common.Hash, *types.Block] blockCache *lru.Cache[common.Hash, *types.Block]
txLookupLock sync.RWMutex
txLookupCache *lru.Cache[common.Hash, txLookup] txLookupCache *lru.Cache[common.Hash, txLookup]
wg sync.WaitGroup wg sync.WaitGroup
@ -265,7 +270,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
cacheConfig = defaultCacheConfig cacheConfig = defaultCacheConfig
} }
// Open trie database with provided config // Open trie database with provided config
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig()) triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(genesis != nil && genesis.IsVerkle()))
// Setup the genesis block, commit the provided genesis specification // Setup the genesis block, commit the provided genesis specification
// to database if the genesis block is not present yet, or load the // to database if the genesis block is not present yet, or load the
@ -436,7 +441,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
} }
if alloc == nil { if alloc == nil {
return nil, fmt.Errorf("live blockchain tracer requires genesis alloc to be set") return nil, errors.New("live blockchain tracer requires genesis alloc to be set")
} }
bc.logger.OnGenesisBlock(bc.genesisBlock, alloc) bc.logger.OnGenesisBlock(bc.genesisBlock, alloc)
@ -639,7 +644,7 @@ func (bc *BlockChain) SetSafe(header *types.Header) {
} }
} }
// rewindPathHead implements the logic of rewindHead in the context of hash scheme. // rewindHashHead implements the logic of rewindHead in the context of hash scheme.
func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) { func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
var ( var (
limit uint64 // The oldest block that will be searched for this rewinding limit uint64 // The oldest block that will be searched for this rewinding
@ -1549,17 +1554,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
return nil return nil
} }
// WriteBlockAndSetHead writes the given block and all associated state to the database,
// and applies the block as the new chain head.
func (bc *BlockChain) WriteBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
if !bc.chainmu.TryLock() {
return NonStatTy, errChainStopped
}
defer bc.chainmu.Unlock()
return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent)
}
// writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead. // writeBlockAndSetHead is the internal implementation of WriteBlockAndSetHead.
// This function expects the chain mutex to be held. // This function expects the chain mutex to be held.
func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) { func (bc *BlockChain) writeBlockAndSetHead(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
@ -2298,14 +2292,14 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
// rewind the canonical chain to a lower point. // rewind the canonical chain to a lower point.
log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain)) log.Error("Impossible reorg, please file an issue", "oldnum", oldBlock.Number(), "oldhash", oldBlock.Hash(), "oldblocks", len(oldChain), "newnum", newBlock.Number(), "newhash", newBlock.Hash(), "newblocks", len(newChain))
} }
// Reset the tx lookup cache in case to clear stale txlookups. // Acquire the tx-lookup lock before mutation. This step is essential
// This is done before writing any new chain data to avoid the // as the txlookups should be changed atomically, and all subsequent
// weird scenario that canonical chain is changed while the // reads should be blocked until the mutation is complete.
// stale lookups are still cached. bc.txLookupLock.Lock()
bc.txLookupCache.Purge()
// Insert the new chain(except the head block(reverse order)), // Insert the new chain segment in incremental order, from the old
// taking care of the proper incremental order. // to the new. The new chain head (newChain[0]) is not inserted here,
// as it will be handled separately outside of this function
for i := len(newChain) - 1; i >= 1; i-- { for i := len(newChain) - 1; i >= 1; i-- {
// Insert the block in the canonical way, re-writing history // Insert the block in the canonical way, re-writing history
bc.writeHeadBlock(newChain[i]) bc.writeHeadBlock(newChain[i])
@ -2342,6 +2336,11 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error {
if err := indexesBatch.Write(); err != nil { if err := indexesBatch.Write(); err != nil {
log.Crit("Failed to delete useless indexes", "err", err) log.Crit("Failed to delete useless indexes", "err", err)
} }
// Reset the tx lookup cache to clear stale txlookup cache.
bc.txLookupCache.Purge()
// Release the tx-lookup lock after mutation.
bc.txLookupLock.Unlock()
// Send out events for logs from the old canon chain, and 'reborn' // Send out events for logs from the old canon chain, and 'reborn'
// logs from the new canon chain. The number of logs can be very // logs from the new canon chain. The number of logs can be very

@ -266,6 +266,9 @@ func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, max
// transaction indexing is already finished. The transaction is not existent // transaction indexing is already finished. The transaction is not existent
// from the node's perspective. // from the node's perspective.
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) (*rawdb.LegacyTxLookupEntry, *types.Transaction, error) { func (bc *BlockChain) GetTransactionLookup(hash common.Hash) (*rawdb.LegacyTxLookupEntry, *types.Transaction, error) {
bc.txLookupLock.RLock()
defer bc.txLookupLock.RUnlock()
// Short circuit if the txlookup already in the cache, retrieve otherwise // Short circuit if the txlookup already in the cache, retrieve otherwise
if item, exist := bc.txLookupCache.Get(hash); exist { if item, exist := bc.txLookupCache.Get(hash); exist {
return item.lookup, item.transaction, nil return item.lookup, item.transaction, nil

@ -22,7 +22,7 @@ package core
import ( import (
"fmt" "fmt"
"math/big" "math/big"
"path" "path/filepath"
"strings" "strings"
"testing" "testing"
"time" "time"
@ -1966,7 +1966,7 @@ func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme
// Create a temporary persistent database // Create a temporary persistent database
datadir := t.TempDir() datadir := t.TempDir()
ancient := path.Join(datadir, "ancient") ancient := filepath.Join(datadir, "ancient")
db, err := rawdb.Open(rawdb.OpenOptions{ db, err := rawdb.Open(rawdb.OpenOptions{
Directory: datadir, Directory: datadir,

@ -596,6 +596,9 @@ func (s *MatcherSession) deliverSections(bit uint, sections []uint64, bitsets []
// of the session, any request in-flight need to be responded to! Empty responses // of the session, any request in-flight need to be responded to! Empty responses
// are fine though in that case. // are fine though in that case.
func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) { func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) {
waitTimer := time.NewTimer(wait)
defer waitTimer.Stop()
for { for {
// Allocate a new bloom bit index to retrieve data for, stopping when done // Allocate a new bloom bit index to retrieve data for, stopping when done
bit, ok := s.allocateRetrieval() bit, ok := s.allocateRetrieval()
@ -604,6 +607,7 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
} }
// Bit allocated, throttle a bit if we're below our batch limit // Bit allocated, throttle a bit if we're below our batch limit
if s.pendingSections(bit) < batch { if s.pendingSections(bit) < batch {
waitTimer.Reset(wait)
select { select {
case <-s.quit: case <-s.quit:
// Session terminating, we can't meaningfully service, abort // Session terminating, we can't meaningfully service, abort
@ -611,7 +615,7 @@ func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan
s.deliverSections(bit, []uint64{}, [][]byte{}) s.deliverSections(bit, []uint64{}, [][]byte{})
return return
case <-time.After(wait): case <-waitTimer.C:
// Throttling up, fetch whatever is available // Throttling up, fetch whatever is available
} }
} }

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb"
"github.com/gballet/go-verkle"
"github.com/holiman/uint256" "github.com/holiman/uint256"
) )
@ -418,6 +419,112 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int,
return db, blocks, receipts return db, blocks, receipts
} }
func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, trdb *triedb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) {
if config == nil {
config = params.TestChainConfig
}
proofs := make([]*verkle.VerkleProof, 0, n)
keyvals := make([]verkle.StateDiff, 0, n)
cm := newChainMaker(parent, config, engine)
genblock := func(i int, parent *types.Block, triedb *triedb.Database, statedb *state.StateDB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, cm: cm, parent: parent, statedb: statedb, engine: engine}
b.header = cm.makeHeader(parent, statedb, b.engine)
// TODO uncomment when proof generation is merged
// Save pre state for proof generation
// preState := statedb.Copy()
// TODO uncomment when the 2935 PR is merged
// if config.IsPrague(b.header.Number, b.header.Time) {
// if !config.IsPrague(b.parent.Number(), b.parent.Time()) {
// Transition case: insert all 256 ancestors
// InsertBlockHashHistoryAtEip2935Fork(statedb, b.header.Number.Uint64()-1, b.header.ParentHash, chainreader)
// } else {
// ProcessParentBlockHash(statedb, b.header.Number.Uint64()-1, b.header.ParentHash)
// }
// }
// Execute any user modifications to the block
if gen != nil {
gen(i, b)
}
body := &types.Body{
Transactions: b.txs,
Uncles: b.uncles,
Withdrawals: b.withdrawals,
}
block, err := b.engine.FinalizeAndAssemble(cm, b.header, statedb, body, b.receipts)
if err != nil {
panic(err)
}
// Write state changes to db
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
if err = triedb.Commit(root, false); err != nil {
panic(fmt.Sprintf("trie write error: %v", err))
}
// TODO uncomment when proof generation is merged
// proofs = append(proofs, block.ExecutionWitness().VerkleProof)
// keyvals = append(keyvals, block.ExecutionWitness().StateDiff)
return block, b.receipts
}
for i := 0; i < n; i++ {
statedb, err := state.New(parent.Root(), state.NewDatabaseWithNodeDB(db, trdb), nil)
if err != nil {
panic(err)
}
block, receipts := genblock(i, parent, trdb, statedb)
// Post-process the receipts.
// Here we assign the final block hash and other info into the receipt.
// In order for DeriveFields to work, the transaction and receipt lists need to be
// of equal length. If AddUncheckedTx or AddUncheckedReceipt are used, there will be
// extra ones, so we just trim the lists here.
receiptsCount := len(receipts)
txs := block.Transactions()
if len(receipts) > len(txs) {
receipts = receipts[:len(txs)]
} else if len(receipts) < len(txs) {
txs = txs[:len(receipts)]
}
var blobGasPrice *big.Int
if block.ExcessBlobGas() != nil {
blobGasPrice = eip4844.CalcBlobFee(*block.ExcessBlobGas())
}
if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, txs); err != nil {
panic(err)
}
// Re-expand to ensure all receipts are returned.
receipts = receipts[:receiptsCount]
// Advance the chain.
cm.add(block, receipts)
parent = block
}
return cm.chain, cm.receipts, proofs, keyvals
}
func GenerateVerkleChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) {
db := rawdb.NewMemoryDatabase()
cacheConfig := DefaultCacheConfigWithScheme(rawdb.PathScheme)
cacheConfig.SnapshotLimit = 0
triedb := triedb.NewDatabase(db, cacheConfig.triedbConfig(true))
defer triedb.Close()
genesisBlock, err := genesis.Commit(db, triedb)
if err != nil {
panic(err)
}
blocks, receipts, proofs, keyvals := GenerateVerkleChain(genesis.Config, genesisBlock, engine, db, triedb, n, gen)
return db, blocks, receipts, proofs, keyvals
}
func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { func (cm *chainMaker) makeHeader(parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
time := parent.Time() + 10 // block time is fixed at 10 seconds time := parent.Time() + 10 // block time is fixed at 10 seconds
header := &types.Header{ header := &types.Header{
@ -482,7 +589,7 @@ func makeBlockChain(chainConfig *params.ChainConfig, parent *types.Block, n int,
return blocks return blocks
} }
// makeBlockChain creates a deterministic chain of blocks from genesis // makeBlockChainWithGenesis creates a deterministic chain of blocks from genesis
func makeBlockChainWithGenesis(genesis *Genesis, n int, engine consensus.Engine, seed int) (ethdb.Database, []*types.Block) { func makeBlockChainWithGenesis(genesis *Genesis, n int, engine consensus.Engine, seed int) (ethdb.Database, []*types.Block) {
db, blocks, _ := GenerateChainWithGenesis(genesis, engine, n, func(i int, b *BlockGen) { db, blocks, _ := GenerateChainWithGenesis(genesis, engine, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)}) b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})

@ -59,7 +59,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.ExcessBlobGas != nil { if header.ExcessBlobGas != nil {
blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas) blobBaseFee = eip4844.CalcBlobFee(*header.ExcessBlobGas)
} }
if header.Difficulty.Cmp(common.Big0) == 0 { if header.Difficulty.Sign() == 0 {
random = &header.MixDigest random = &header.MixDigest
} }
return vm.BlockContext{ return vm.BlockContext{

@ -24,12 +24,12 @@ import (
"math" "math"
"math/big" "math/big"
"reflect" "reflect"
"slices"
"strings" "strings"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"golang.org/x/exp/slices"
) )
var ( var (

@ -582,7 +582,7 @@ func DeveloperGenesisBlock(gasLimit uint64, faucet *common.Address) *Genesis {
Config: &config, Config: &config,
GasLimit: gasLimit, GasLimit: gasLimit,
BaseFee: big.NewInt(params.InitialBaseFee), BaseFee: big.NewInt(params.InitialBaseFee),
Difficulty: big.NewInt(1), Difficulty: big.NewInt(0),
Alloc: map[common.Address]types.Account{ Alloc: map[common.Address]types.Account{
common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover
common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256 common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256

@ -30,12 +30,12 @@ import (
"fmt" "fmt"
"math/big" "math/big"
"os" "os"
"slices"
"strconv" "strconv"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/exp/slices"
) )
type allocItem struct { type allocItem struct {

@ -22,6 +22,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"slices"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/consensus/misc/eip4844"
@ -31,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/exp/slices"
) )
// ReadCanonicalHash retrieves the hash assigned to a canonical block number. // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
@ -695,7 +695,7 @@ func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
return nil return nil
} }
// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc. // deriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error { func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
logIndex := uint(0) logIndex := uint(0)
if len(txs) != len(receipts) { if len(txs) != len(receipts) {

@ -21,7 +21,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"os" "os"
"path"
"path/filepath" "path/filepath"
"strings" "strings"
"time" "time"
@ -172,7 +171,7 @@ func resolveChainFreezerDir(ancient string) string {
// sub folder, if not then two possibilities: // sub folder, if not then two possibilities:
// - chain freezer is not initialized // - chain freezer is not initialized
// - chain freezer exists in legacy location (root ancient folder) // - chain freezer exists in legacy location (root ancient folder)
freezer := path.Join(ancient, ChainFreezerName) freezer := filepath.Join(ancient, ChainFreezerName)
if !common.FileExist(freezer) { if !common.FileExist(freezer) {
if !common.FileExist(ancient) { if !common.FileExist(ancient) {
// The entire ancient store is not initialized, still use the sub // The entire ancient store is not initialized, still use the sub

@ -23,7 +23,6 @@ import (
"math/big" "math/big"
"math/rand" "math/rand"
"os" "os"
"path"
"path/filepath" "path/filepath"
"sync" "sync"
"testing" "testing"
@ -398,11 +397,11 @@ func TestRenameWindows(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
f2, err := os.Create(path.Join(dir1, fname2)) f2, err := os.Create(filepath.Join(dir1, fname2))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
f3, err := os.Create(path.Join(dir2, fname2)) f3, err := os.Create(filepath.Join(dir2, fname2))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -424,15 +423,15 @@ func TestRenameWindows(t *testing.T) {
if err := f3.Close(); err != nil { if err := f3.Close(); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := os.Rename(f.Name(), path.Join(dir2, fname)); err != nil { if err := os.Rename(f.Name(), filepath.Join(dir2, fname)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
if err := os.Rename(f2.Name(), path.Join(dir2, fname2)); err != nil { if err := os.Rename(f2.Name(), filepath.Join(dir2, fname2)); err != nil {
t.Fatal(err) t.Fatal(err)
} }
// Check file contents // Check file contents
f, err = os.Open(path.Join(dir2, fname)) f, err = os.Open(filepath.Join(dir2, fname))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -446,7 +445,7 @@ func TestRenameWindows(t *testing.T) {
t.Errorf("unexpected file contents. Got %v\n", buf) t.Errorf("unexpected file contents. Got %v\n", buf)
} }
f, err = os.Open(path.Join(dir2, fname2)) f, err = os.Open(filepath.Join(dir2, fname2))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

@ -17,6 +17,8 @@
package state package state
import ( import (
"maps"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
@ -57,16 +59,10 @@ func newAccessList() *accessList {
// Copy creates an independent copy of an accessList. // Copy creates an independent copy of an accessList.
func (a *accessList) Copy() *accessList { func (a *accessList) Copy() *accessList {
cp := newAccessList() cp := newAccessList()
for k, v := range a.addresses { cp.addresses = maps.Clone(a.addresses)
cp.addresses[k] = v
}
cp.slots = make([]map[common.Hash]struct{}, len(a.slots)) cp.slots = make([]map[common.Hash]struct{}, len(a.slots))
for i, slotMap := range a.slots { for i, slotMap := range a.slots {
newSlotmap := make(map[common.Hash]struct{}, len(slotMap)) cp.slots[i] = maps.Clone(slotMap)
for k := range slotMap {
newSlotmap[k] = struct{}{}
}
cp.slots[i] = newSlotmap
} }
return cp return cp
} }

@ -209,6 +209,8 @@ func (db *cachingDB) CopyTrie(t Trie) Trie {
switch t := t.(type) { switch t := t.(type) {
case *trie.StateTrie: case *trie.StateTrie:
return t.Copy() return t.Copy()
case *trie.VerkleTrie:
return t.Copy()
default: default:
panic(fmt.Errorf("unknown trie type %T", t)) panic(fmt.Errorf("unknown trie type %T", t))
} }

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"math" "math"
"math/rand" "math/rand"
"slices"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@ -29,7 +30,6 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
bloomfilter "github.com/holiman/bloomfilter/v2" bloomfilter "github.com/holiman/bloomfilter/v2"
"golang.org/x/exp/slices"
) )
var ( var (

@ -19,10 +19,10 @@ package snapshot
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"slices"
"sort" "sort"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"golang.org/x/exp/slices"
) )
// weightedIterator is a iterator with an assigned weight. It is used to prioritise // weightedIterator is a iterator with an assigned weight. It is used to prioritise

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"maps"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -47,11 +48,7 @@ func (s Storage) String() (str string) {
} }
func (s Storage) Copy() Storage { func (s Storage) Copy() Storage {
cpy := make(Storage, len(s)) return maps.Clone(s)
for key, value := range s {
cpy[key] = value
}
return cpy
} }
// stateObject represents an Ethereum account which is being modified. // stateObject represents an Ethereum account which is being modified.
@ -298,6 +295,18 @@ func (s *stateObject) updateTrie() (Trie, error) {
} }
// Insert all the pending storage updates into the trie // Insert all the pending storage updates into the trie
usedStorage := make([][]byte, 0, len(s.pendingStorage)) usedStorage := make([][]byte, 0, len(s.pendingStorage))
// Perform trie updates before deletions. This prevents resolution of unnecessary trie nodes
// in circumstances similar to the following:
//
// Consider nodes `A` and `B` who share the same full node parent `P` and have no other siblings.
// During the execution of a block:
// - `A` is deleted,
// - `C` is created, and also shares the parent `P`.
// If the deletion is handled first, then `P` would be left with only one child, thus collapsed
// into a shortnode. This requires `B` to be resolved from disk.
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
var deletions []common.Hash
for key, value := range s.pendingStorage { for key, value := range s.pendingStorage {
// Skip noop changes, persist actual changes // Skip noop changes, persist actual changes
if value == s.originStorage[key] { if value == s.originStorage[key] {
@ -307,13 +316,7 @@ func (s *stateObject) updateTrie() (Trie, error) {
s.originStorage[key] = value s.originStorage[key] = value
var encoded []byte // rlp-encoded value to be used by the snapshot var encoded []byte // rlp-encoded value to be used by the snapshot
if (value == common.Hash{}) { if (value != common.Hash{}) {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
return nil, err
}
s.db.StorageDeleted += 1
} else {
// Encoding []byte cannot fail, ok to ignore the error. // Encoding []byte cannot fail, ok to ignore the error.
trimmed := common.TrimLeftZeroes(value[:]) trimmed := common.TrimLeftZeroes(value[:])
encoded, _ = rlp.EncodeToBytes(trimmed) encoded, _ = rlp.EncodeToBytes(trimmed)
@ -322,6 +325,8 @@ func (s *stateObject) updateTrie() (Trie, error) {
return nil, err return nil, err
} }
s.db.StorageUpdated += 1 s.db.StorageUpdated += 1
} else {
deletions = append(deletions, key)
} }
// Cache the mutated storage slots until commit // Cache the mutated storage slots until commit
if storage == nil { if storage == nil {
@ -353,6 +358,13 @@ func (s *stateObject) updateTrie() (Trie, error) {
// Cache the items for preloading // Cache the items for preloading
usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure
} }
for _, key := range deletions {
if err := tr.DeleteStorage(s.address, key[:]); err != nil {
s.db.setError(err)
return nil, err
}
s.db.StorageDeleted += 1
}
if s.db.prefetcher != nil { if s.db.prefetcher != nil {
s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage) s.db.prefetcher.used(s.addrHash, s.data.Root, usedStorage)
} }

@ -19,7 +19,9 @@ package state
import ( import (
"fmt" "fmt"
"maps"
"math/big" "math/big"
"slices"
"sort" "sort"
"time" "time"
@ -243,9 +245,7 @@ func (s *StateDB) Logs() []*types.Log {
func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) { func (s *StateDB) AddPreimage(hash common.Hash, preimage []byte) {
if _, ok := s.preimages[hash]; !ok { if _, ok := s.preimages[hash]; !ok {
s.journal.append(addPreimageChange{hash: hash}) s.journal.append(addPreimageChange{hash: hash})
pi := make([]byte, len(preimage)) s.preimages[hash] = slices.Clone(preimage)
copy(pi, preimage)
s.preimages[hash] = pi
} }
} }
@ -541,12 +541,11 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
} }
// deleteStateObject removes the given object from the state trie. // deleteStateObject removes the given object from the state trie.
func (s *StateDB) deleteStateObject(obj *stateObject) { func (s *StateDB) deleteStateObject(addr common.Address) {
// Track the amount of time wasted on deleting the account from the trie // Track the amount of time wasted on deleting the account from the trie
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
// Delete the account from the trie // Delete the account from the trie
addr := obj.Address()
if err := s.trie.DeleteAccount(addr); err != nil { if err := s.trie.DeleteAccount(addr); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err)) s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
} }
@ -752,9 +751,8 @@ func (s *StateDB) Copy() *StateDB {
state.stateObjectsDirty[addr] = struct{}{} state.stateObjectsDirty[addr] = struct{}{}
} }
// Deep copy the destruction markers. // Deep copy the destruction markers.
for addr, value := range s.stateObjectsDestruct { state.stateObjectsDestruct = maps.Clone(s.stateObjectsDestruct)
state.stateObjectsDestruct[addr] = value
}
// Deep copy the state changes made in the scope of block // Deep copy the state changes made in the scope of block
// along with their original values. // along with their original values.
state.accounts = copySet(s.accounts) state.accounts = copySet(s.accounts)
@ -772,9 +770,7 @@ func (s *StateDB) Copy() *StateDB {
state.logs[hash] = cpy state.logs[hash] = cpy
} }
// Deep copy the preimages occurred in the scope of block // Deep copy the preimages occurred in the scope of block
for hash, preimage := range s.preimages { state.preimages = maps.Clone(s.preimages)
state.preimages[hash] = preimage
}
// Do we need to copy the access list and transient storage? // Do we need to copy the access list and transient storage?
// In practice: No. At the start of a transaction, these two lists are empty. // In practice: No. At the start of a transaction, these two lists are empty.
// In practice, we only ever copy state _between_ transactions/blocks, never // In practice, we only ever copy state _between_ transactions/blocks, never
@ -917,16 +913,30 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
} }
} }
usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) usedAddrs := make([][]byte, 0, len(s.stateObjectsPending))
// Perform updates before deletions. This prevents resolution of unnecessary trie nodes
// in circumstances similar to the following:
//
// Consider nodes `A` and `B` who share the same full node parent `P` and have no other siblings.
// During the execution of a block:
// - `A` self-destructs,
// - `C` is created, and also shares the parent `P`.
// If the self-destruct is handled first, then `P` would be left with only one child, thus collapsed
// into a shortnode. This requires `B` to be resolved from disk.
// Whereas if the created node is handled first, then the collapse is avoided, and `B` is not resolved.
var deletedAddrs []common.Address
for addr := range s.stateObjectsPending { for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; obj.deleted { if obj := s.stateObjects[addr]; !obj.deleted {
s.deleteStateObject(obj)
s.AccountDeleted += 1
} else {
s.updateStateObject(obj) s.updateStateObject(obj)
s.AccountUpdated += 1 s.AccountUpdated += 1
} else {
deletedAddrs = append(deletedAddrs, obj.address)
} }
usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
} }
for _, deletedAddr := range deletedAddrs {
s.deleteStateObject(deletedAddr)
s.AccountDeleted += 1
}
if prefetcher != nil { if prefetcher != nil {
prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs) prefetcher.used(common.Hash{}, s.originalRoot, usedAddrs)
} }
@ -1143,6 +1153,11 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) error {
return nil return nil
} }
// GetTrie returns the account trie.
func (s *StateDB) GetTrie() Trie {
return s.trie
}
// Commit writes the state to the underlying in-memory trie database. // Commit writes the state to the underlying in-memory trie database.
// Once the state is committed, tries cached in stateDB (including account // Once the state is committed, tries cached in stateDB (including account
// trie, storage tries) will no longer be functional. A new state instance // trie, storage tries) will no longer be functional. A new state instance

@ -422,3 +422,108 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
} }
return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
} }
var (
code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`)
intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, true, true, true, true)
// A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness
// will not contain that copied data.
// Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985
codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`)
intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, true, true, true, true)
)
func TestProcessVerkle(t *testing.T) {
var (
config = &params.ChainConfig{
ChainID: big.NewInt(1),
HomesteadBlock: big.NewInt(0),
EIP150Block: big.NewInt(0),
EIP155Block: big.NewInt(0),
EIP158Block: big.NewInt(0),
ByzantiumBlock: big.NewInt(0),
ConstantinopleBlock: big.NewInt(0),
PetersburgBlock: big.NewInt(0),
IstanbulBlock: big.NewInt(0),
MuirGlacierBlock: big.NewInt(0),
BerlinBlock: big.NewInt(0),
LondonBlock: big.NewInt(0),
Ethash: new(params.EthashConfig),
ShanghaiTime: u64(0),
VerkleTime: u64(0),
TerminalTotalDifficulty: common.Big0,
TerminalTotalDifficultyPassed: true,
// TODO uncomment when proof generation is merged
// ProofInBlocks: true,
}
signer = types.LatestSigner(config)
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain
coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7")
gspec = &Genesis{
Config: config,
Alloc: GenesisAlloc{
coinbase: GenesisAccount{
Balance: big.NewInt(1000000000000000000), // 1 ether
Nonce: 0,
},
},
}
)
// Verkle trees use the snapshot, which must be enabled before the
// data is saved into the tree+database.
// genesis := gspec.MustCommit(bcdb, triedb)
cacheConfig := DefaultCacheConfigWithScheme("path")
cacheConfig.SnapshotLimit = 0
blockchain, _ := NewBlockChain(bcdb, cacheConfig, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil)
defer blockchain.Stop()
txCost1 := params.TxGas
txCost2 := params.TxGas
contractCreationCost := intrinsicContractCreationGas + uint64(2039 /* execution costs */)
codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(293644 /* execution costs */)
blockGasUsagesExpected := []uint64{
txCost1*2 + txCost2,
txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas,
}
_, chain, _, _, _ := GenerateVerkleChainWithGenesis(gspec, beacon.New(ethash.NewFaker()), 2, func(i int, gen *BlockGen) {
gen.SetPoS()
// TODO need to check that the tx cost provided is the exact amount used (no remaining left-over)
tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey)
gen.AddTx(tx)
// Add two contract creations in block #2
if i == 1 {
tx, _ = types.SignTx(types.NewContractCreation(6, big.NewInt(16), 3000000, big.NewInt(875000000), code), signer, testKey)
gen.AddTx(tx)
tx, _ = types.SignTx(types.NewContractCreation(7, big.NewInt(0), 3000000, big.NewInt(875000000), codeWithExtCodeCopy), signer, testKey)
gen.AddTx(tx)
}
})
t.Log("inserting blocks into the chain")
endnum, err := blockchain.InsertChain(chain)
if err != nil {
t.Fatalf("block %d imported with error: %v", endnum, err)
}
for i := 0; i < 2; i++ {
b := blockchain.GetBlockByNumber(uint64(i) + 1)
if b == nil {
t.Fatalf("expected block %d to be present in chain", i+1)
}
if b.Hash() != chain[i].Hash() {
t.Fatalf("block #%d not found at expected height", b.NumberU64())
}
if b.GasUsed() != blockGasUsagesExpected[i] {
t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed())
}
}
}

@ -0,0 +1,69 @@
# Changelog
All notable changes to the tracing interface will be documented in this file.
## [Unreleased]
There has been a major breaking change in the tracing interface for custom native tracers. JS and built-in tracers are not affected by this change and tracing API methods may be used as before. This overhaul has been done as part of the new live tracing feature ([#29189](https://github.com/ethereum/go-ethereum/pull/29189)). To learn more about live tracing please refer to the [docs](https://geth.ethereum.org/docs/developers/evm-tracing/live-tracing).
**The `EVMLogger` interface which the tracers implemented has been removed.** It has been replaced by a new struct `tracing.Hooks`. `Hooks` keeps pointers to event listening functions. Internally the EVM will use these function pointers to emit events and can skip an event if the tracer has opted not to implement it. In fact this is the main reason for this change of approach. Another benefit is the ease of adding new hooks in future, and dynamically assigning event receivers.
The consequence of this change can be seen in the constructor of a tracer. Let's take the 4byte tracer as an example. Previously the constructor return an instance which satisfied the interface. Now it should return a pointer to `tracers.Tracer` (which is now also a struct as opposed to an interface) and explicitly assign the event listeners. As a side-benefit the tracers will not have to provide empty implementation of methods just to satisfy the interface:
```go
func newFourByteTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) {
t := &fourByteTracer{
ids: make(map[string]int),
}
return t, nil
}
```
And now:
```go
func newFourByteTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) {
t := &fourByteTracer{
ids: make(map[string]int),
}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
OnTxStart: t.onTxStart,
OnEnter: t.onEnter,
},
GetResult: t.getResult,
Stop: t.stop,
}, nil
}
```
### Event listeners
If you have sharp eyes you might have noticed the new names for `OnTxStart` and `OnEnter`, previously called `CaptureTxStart` and `CaptureEnter`. Indeed there have been various modifications to the signatures of the event listeners. All method names now follow the `On*` pattern instead of `Capture*`. However the modifications are not limited to the names.
#### New methods
The live tracing feature was half about adding more observability into the state of the blockchain. As such there have been a host of method additions. Please consult the [Hooks](./hooks.go) struct for the full list of methods. Custom tracers which are invoked through the API (as opposed to "live" tracers) can benefit from the following new methods:
- `OnGasChange(old, new uint64, reason GasChangeReason)`: This hook tracks the lifetime of gas within a transaction and its subcalls. It will first track the initial purchase of gas with ether, then the following consumptions and refunds of gas until at the end the rest is returned.
- `OnBalanceChange(addr common.Address, prev, new *big.Int, reason BalanceChangeReason)`: This hook tracks the balance changes of accounts. Where possible a reason is provided for the change (e.g. a transfer, gas purchase, withdrawal deposit etc).
- `OnNonceChange(addr common.Address, prev, new uint64)`: This hook tracks the nonce changes of accounts.
- `OnCodeChange(addr common.Address, prevCodeHash common.Hash, prevCode []byte, codeHash common.Hash, code []byte)`: This hook tracks the code changes of accounts.
- `OnStorageChange(addr common.Address, slot common.Hash, prev, new common.Hash)`: This hook tracks the storage changes of accounts.
- `OnLogChange(log *types.Log)`: This hook tracks the logs emitted by the EVM.
#### Removed methods
The hooks `CaptureStart` and `CaptureEnd` have been removed. These hooks signaled the top-level call frame of a transaction. The relevant info will be now emitted by `OnEnter` and `OnExit` which are emitted for every call frame. They now contain a `depth` parameter which can be used to distinguish the top-level call frame when necessary. The `create bool` parameter to `CaptureStart` can now be inferred from `typ byte` in `OnEnter`, i.e. `vm.OpCode(typ) == vm.CREATE`.
#### Modified methods
- `CaptureTxStart` -> `OnTxStart(vm *VMContext, tx *types.Transaction, from common.Address)`. It now emits the full transaction object as well as `from` which should be used to get the sender address. The `*VMContext` is a replacement for the `*vm.EVM` object previously passed to `CaptureStart`.
- `CaptureTxEnd` -> `OnTxEnd(receipt *types.Receipt, err error)`. It now returns the full receipt object.
- `CaptureEnter` -> `OnEnter(depth int, typ byte, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int)`. The new `depth int` parameter indicates the call stack depth. It is 0 for the top-level call. Furthermore, the location where `OnEnter` is called in the EVM is now made a soon as a call is started. This means some specific error cases that were not before calling `OnEnter/OnExit` will now do so, leading some transaction to have an extra call traced.
- `CaptureExit` -> `OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool)`. It has the new `depth` parameter, same as `OnEnter`. The new `reverted` parameter indicates whether the call frame was reverted.
- `CaptureState` -> `OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error)`. `op` is of type `byte` which can be cast to `vm.OpCode` when necessary. A `*vm.ScopeContext` is not passed anymore. It is replaced by `tracing.OpContext` which offers access to the memory, stack and current contract.
- `CaptureFault` -> `OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error)`. Similar to above.
[unreleased]: https://github.com/ethereum/go-ethereum/compare/v1.13.14...master

@ -1226,7 +1226,7 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error
return errs return errs
} }
// Add inserts a new blob transaction into the pool if it passes validation (both // add inserts a new blob transaction into the pool if it passes validation (both
// consensus validity and pool restrictions). // consensus validity and pool restrictions).
func (p *BlobPool) add(tx *types.Transaction) (err error) { func (p *BlobPool) add(tx *types.Transaction) (err error) {
// The blob pool blocks on adding a transaction. This is because blob txs are // The blob pool blocks on adding a transaction. This is because blob txs are

@ -60,7 +60,7 @@ func newLimbo(datadir string) (*limbo, error) {
fails = append(fails, id) fails = append(fails, id)
} }
} }
store, err := billy.Open(billy.Options{Path: datadir}, newSlotter(), index) store, err := billy.Open(billy.Options{Path: datadir, Repair: true}, newSlotter(), index)
if err != nil { if err != nil {
return nil, err return nil, err
} }

@ -20,6 +20,7 @@ import (
"container/heap" "container/heap"
"math" "math"
"math/big" "math/big"
"slices"
"sort" "sort"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -28,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"golang.org/x/exp/slices"
) )
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for // nonceHeap is a heap.Interface implementation over 64bit unsigned integers for

@ -197,8 +197,8 @@ type Block struct {
withdrawals Withdrawals withdrawals Withdrawals
// caches // caches
hash atomic.Value hash atomic.Pointer[common.Hash]
size atomic.Value size atomic.Uint64
// These fields are used by package eth to track // These fields are used by package eth to track
// inter-peer block relay. // inter-peer block relay.
@ -406,8 +406,8 @@ func (b *Block) BlobGasUsed() *uint64 {
// Size returns the true RLP encoded storage size of the block, either by encoding // Size returns the true RLP encoded storage size of the block, either by encoding
// and returning it, or returning a previously cached value. // and returning it, or returning a previously cached value.
func (b *Block) Size() uint64 { func (b *Block) Size() uint64 {
if size := b.size.Load(); size != nil { if size := b.size.Load(); size > 0 {
return size.(uint64) return size
} }
c := writeCounter(0) c := writeCounter(0)
rlp.Encode(&c, b) rlp.Encode(&c, b)
@ -486,11 +486,11 @@ func (b *Block) WithWithdrawals(withdrawals []*Withdrawal) *Block {
// The hash is computed on the first call and cached thereafter. // The hash is computed on the first call and cached thereafter.
func (b *Block) Hash() common.Hash { func (b *Block) Hash() common.Hash {
if hash := b.hash.Load(); hash != nil { if hash := b.hash.Load(); hash != nil {
return hash.(common.Hash) return *hash
} }
v := b.header.Hash() h := b.header.Hash()
b.hash.Store(v) b.hash.Store(&h)
return v return h
} }
type Blocks []*Block type Blocks []*Block

@ -57,9 +57,9 @@ type Transaction struct {
time time.Time // Time first seen locally (spam avoidance) time time.Time // Time first seen locally (spam avoidance)
// caches // caches
hash atomic.Value hash atomic.Pointer[common.Hash]
size atomic.Value size atomic.Uint64
from atomic.Value from atomic.Pointer[sigCache]
} }
// NewTx creates a new transaction. // NewTx creates a new transaction.
@ -446,6 +446,26 @@ func (tx *Transaction) WithoutBlobTxSidecar() *Transaction {
return cpy return cpy
} }
// WithBlobTxSidecar returns a copy of tx with the blob sidecar added.
func (tx *Transaction) WithBlobTxSidecar(sideCar *BlobTxSidecar) *Transaction {
blobtx, ok := tx.inner.(*BlobTx)
if !ok {
return tx
}
cpy := &Transaction{
inner: blobtx.withSidecar(sideCar),
time: tx.time,
}
// Note: tx.size cache not carried over because the sidecar is included in size!
if h := tx.hash.Load(); h != nil {
cpy.hash.Store(h)
}
if f := tx.from.Load(); f != nil {
cpy.from.Store(f)
}
return cpy
}
// SetTime sets the decoding time of a transaction. This is used by tests to set // SetTime sets the decoding time of a transaction. This is used by tests to set
// arbitrary times and by persistent transaction pools when loading old txs from // arbitrary times and by persistent transaction pools when loading old txs from
// disk. // disk.
@ -462,7 +482,7 @@ func (tx *Transaction) Time() time.Time {
// Hash returns the transaction hash. // Hash returns the transaction hash.
func (tx *Transaction) Hash() common.Hash { func (tx *Transaction) Hash() common.Hash {
if hash := tx.hash.Load(); hash != nil { if hash := tx.hash.Load(); hash != nil {
return hash.(common.Hash) return *hash
} }
var h common.Hash var h common.Hash
@ -471,15 +491,15 @@ func (tx *Transaction) Hash() common.Hash {
} else { } else {
h = prefixedRlpHash(tx.Type(), tx.inner) h = prefixedRlpHash(tx.Type(), tx.inner)
} }
tx.hash.Store(h) tx.hash.Store(&h)
return h return h
} }
// Size returns the true encoded storage size of the transaction, either by encoding // Size returns the true encoded storage size of the transaction, either by encoding
// and returning it, or returning a previously cached value. // and returning it, or returning a previously cached value.
func (tx *Transaction) Size() uint64 { func (tx *Transaction) Size() uint64 {
if size := tx.size.Load(); size != nil { if size := tx.size.Load(); size > 0 {
return size.(uint64) return size
} }
// Cache miss, encode and cache. // Cache miss, encode and cache.

@ -128,8 +128,7 @@ func MustSignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) *Transaction
// signing method. The cache is invalidated if the cached signer does // signing method. The cache is invalidated if the cached signer does
// not match the signer used in the current call. // not match the signer used in the current call.
func Sender(signer Signer, tx *Transaction) (common.Address, error) { func Sender(signer Signer, tx *Transaction) (common.Address, error) {
if sc := tx.from.Load(); sc != nil { if sigCache := tx.from.Load(); sigCache != nil {
sigCache := sc.(sigCache)
// If the signer used to derive from in a previous // If the signer used to derive from in a previous
// call is not the same as used current, invalidate // call is not the same as used current, invalidate
// the cache. // the cache.
@ -142,7 +141,7 @@ func Sender(signer Signer, tx *Transaction) (common.Address, error) {
if err != nil { if err != nil {
return common.Address{}, err return common.Address{}, err
} }
tx.from.Store(sigCache{signer: signer, from: addr}) tx.from.Store(&sigCache{signer: signer, from: addr})
return addr, nil return addr, nil
} }

@ -22,6 +22,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"maps"
"math/big" "math/big"
"reflect" "reflect"
"testing" "testing"
@ -515,10 +516,7 @@ func TestYParityJSONUnmarshalling(t *testing.T) {
test := test test := test
t.Run(fmt.Sprintf("txType=%d: %s", txType, test.name), func(t *testing.T) { t.Run(fmt.Sprintf("txType=%d: %s", txType, test.name), func(t *testing.T) {
// Copy the base json // Copy the base json
testJson := make(map[string]interface{}) testJson := maps.Clone(baseJson)
for k, v := range baseJson {
testJson[k] = v
}
// Set v, yParity and type // Set v, yParity and type
if test.v != "" { if test.v != "" {

@ -191,6 +191,12 @@ func (tx *BlobTx) withoutSidecar() *BlobTx {
return &cpy return &cpy
} }
func (tx *BlobTx) withSidecar(sideCar *BlobTxSidecar) *BlobTx {
cpy := *tx
cpy.Sidecar = sideCar
return &cpy
}
func (tx *BlobTx) encode(b *bytes.Buffer) error { func (tx *BlobTx) encode(b *bytes.Buffer) error {
if tx.Sidecar == nil { if tx.Sidecar == nil {
return rlp.Encode(b, tx) return rlp.Encode(b, tx)

@ -111,15 +111,15 @@ var PrecompiledContractsCancun = map[common.Address]PrecompiledContract{
// PrecompiledContractsBLS contains the set of pre-compiled Ethereum // PrecompiledContractsBLS contains the set of pre-compiled Ethereum
// contracts specified in EIP-2537. These are exported for testing purposes. // contracts specified in EIP-2537. These are exported for testing purposes.
var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{ var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{10}): &bls12381G1Add{}, common.BytesToAddress([]byte{11}): &bls12381G1Add{},
common.BytesToAddress([]byte{11}): &bls12381G1Mul{}, common.BytesToAddress([]byte{12}): &bls12381G1Mul{},
common.BytesToAddress([]byte{12}): &bls12381G1MultiExp{}, common.BytesToAddress([]byte{13}): &bls12381G1MultiExp{},
common.BytesToAddress([]byte{13}): &bls12381G2Add{}, common.BytesToAddress([]byte{14}): &bls12381G2Add{},
common.BytesToAddress([]byte{14}): &bls12381G2Mul{}, common.BytesToAddress([]byte{15}): &bls12381G2Mul{},
common.BytesToAddress([]byte{15}): &bls12381G2MultiExp{}, common.BytesToAddress([]byte{16}): &bls12381G2MultiExp{},
common.BytesToAddress([]byte{16}): &bls12381Pairing{}, common.BytesToAddress([]byte{17}): &bls12381Pairing{},
common.BytesToAddress([]byte{17}): &bls12381MapG1{}, common.BytesToAddress([]byte{18}): &bls12381MapG1{},
common.BytesToAddress([]byte{18}): &bls12381MapG2{}, common.BytesToAddress([]byte{19}): &bls12381MapG2{},
} }
var ( var (
@ -182,7 +182,7 @@ func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uin
return output, suppliedGas, err return output, suppliedGas, err
} }
// ECRECOVER implemented as a native contract. // ecrecover implemented as a native contract.
type ecrecover struct{} type ecrecover struct{}
func (c *ecrecover) RequiredGas(input []byte) uint64 { func (c *ecrecover) RequiredGas(input []byte) uint64 {
@ -457,7 +457,7 @@ func runBn256Add(input []byte) ([]byte, error) {
return res.Marshal(), nil return res.Marshal(), nil
} }
// bn256Add implements a native elliptic curve point addition conforming to // bn256AddIstanbul implements a native elliptic curve point addition conforming to
// Istanbul consensus rules. // Istanbul consensus rules.
type bn256AddIstanbul struct{} type bn256AddIstanbul struct{}

@ -31,6 +31,7 @@ var (
ErrContractAddressCollision = errors.New("contract address collision") ErrContractAddressCollision = errors.New("contract address collision")
ErrExecutionReverted = errors.New("execution reverted") ErrExecutionReverted = errors.New("execution reverted")
ErrMaxCodeSizeExceeded = errors.New("max code size exceeded") ErrMaxCodeSizeExceeded = errors.New("max code size exceeded")
ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded")
ErrInvalidJump = errors.New("invalid jump destination") ErrInvalidJump = errors.New("invalid jump destination")
ErrWriteProtection = errors.New("write protection") ErrWriteProtection = errors.New("write protection")
ErrReturnDataOutOfBounds = errors.New("return data out of bounds") ErrReturnDataOutOfBounds = errors.New("return data out of bounds")

@ -439,13 +439,19 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
if evm.chainRules.IsBerlin { if evm.chainRules.IsBerlin {
evm.StateDB.AddAddressToAccessList(address) evm.StateDB.AddAddressToAccessList(address)
} }
// Ensure there's no existing contract already at the designated address // Ensure there's no existing contract already at the designated address.
// Account is regarded as existent if any of these three conditions is met:
// - the nonce is nonzero
// - the code is non-empty
// - the storage is non-empty
contractHash := evm.StateDB.GetCodeHash(address) contractHash := evm.StateDB.GetCodeHash(address)
if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) { storageRoot := evm.StateDB.GetStorageRoot(address)
if evm.StateDB.GetNonce(address) != 0 ||
(contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) || // non-empty code
(storageRoot != (common.Hash{}) && storageRoot != types.EmptyRootHash) { // non-empty storage
if evm.Config.Tracer != nil && evm.Config.Tracer.OnGasChange != nil { if evm.Config.Tracer != nil && evm.Config.Tracer.OnGasChange != nil {
evm.Config.Tracer.OnGasChange(gas, 0, tracing.GasChangeCallFailedExecution) evm.Config.Tracer.OnGasChange(gas, 0, tracing.GasChangeCallFailedExecution)
} }
return nil, common.Address{}, 0, ErrContractAddressCollision return nil, common.Address{}, 0, ErrContractAddressCollision
} }
// Create a new account on the state // Create a new account on the state

@ -18,6 +18,7 @@ package vm
import ( import (
"errors" "errors"
"fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
@ -310,9 +311,12 @@ func gasCreateEip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m
return 0, err return 0, err
} }
size, overflow := stack.Back(2).Uint64WithOverflow() size, overflow := stack.Back(2).Uint64WithOverflow()
if overflow || size > params.MaxInitCodeSize { if overflow {
return 0, ErrGasUintOverflow return 0, ErrGasUintOverflow
} }
if size > params.MaxInitCodeSize {
return 0, fmt.Errorf("%w: size %d", ErrMaxInitCodeSizeExceeded, size)
}
// Since size <= params.MaxInitCodeSize, these multiplication cannot overflow // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow
moreGas := params.InitCodeWordGas * ((size + 31) / 32) moreGas := params.InitCodeWordGas * ((size + 31) / 32)
if gas, overflow = math.SafeAdd(gas, moreGas); overflow { if gas, overflow = math.SafeAdd(gas, moreGas); overflow {
@ -326,9 +330,12 @@ func gasCreate2Eip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory,
return 0, err return 0, err
} }
size, overflow := stack.Back(2).Uint64WithOverflow() size, overflow := stack.Back(2).Uint64WithOverflow()
if overflow || size > params.MaxInitCodeSize { if overflow {
return 0, ErrGasUintOverflow return 0, ErrGasUintOverflow
} }
if size > params.MaxInitCodeSize {
return 0, fmt.Errorf("%w: size %d", ErrMaxInitCodeSizeExceeded, size)
}
// Since size <= params.MaxInitCodeSize, these multiplication cannot overflow // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow
moreGas := (params.InitCodeWordGas + params.Keccak256WordGas) * ((size + 31) / 32) moreGas := (params.InitCodeWordGas + params.Keccak256WordGas) * ((size + 31) / 32)
if gas, overflow = math.SafeAdd(gas, moreGas); overflow { if gas, overflow = math.SafeAdd(gas, moreGas); overflow {

@ -18,6 +18,7 @@ package vm
import ( import (
"bytes" "bytes"
"errors"
"math" "math"
"math/big" "math/big"
"sort" "sort"
@ -98,7 +99,7 @@ func TestEIP2200(t *testing.T) {
vmenv := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}}) vmenv := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}})
_, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, tt.gaspool, new(uint256.Int)) _, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, tt.gaspool, new(uint256.Int))
if err != tt.failure { if !errors.Is(err, tt.failure) {
t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.failure) t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.failure)
} }
if used := tt.gaspool - gas; used != tt.used { if used := tt.gaspool - gas; used != tt.used {

@ -49,6 +49,7 @@ type StateDB interface {
GetCommittedState(common.Address, common.Hash) common.Hash GetCommittedState(common.Address, common.Hash) common.Hash
GetState(common.Address, common.Hash) common.Hash GetState(common.Address, common.Hash) common.Hash
SetState(common.Address, common.Hash, common.Hash) SetState(common.Address, common.Hash, common.Hash)
GetStorageRoot(addr common.Address) common.Hash
GetTransientState(addr common.Address, key common.Hash) common.Hash GetTransientState(addr common.Address, key common.Hash) common.Hash
SetTransientState(addr common.Address, key, value common.Hash) SetTransientState(addr common.Address, key, value common.Hash)

@ -17,6 +17,8 @@
package vm package vm
import ( import (
"fmt"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/tracing" "github.com/ethereum/go-ethereum/core/tracing"
@ -50,7 +52,7 @@ func (ctx *ScopeContext) MemoryData() []byte {
return ctx.Memory.Data() return ctx.Memory.Data()
} }
// MemoryData returns the stack data. Callers must not modify the contents // StackData returns the stack data. Callers must not modify the contents
// of the returned data. // of the returned data.
func (ctx *ScopeContext) StackData() []uint256.Int { func (ctx *ScopeContext) StackData() []uint256.Int {
if ctx.Stack == nil { if ctx.Stack == nil {
@ -255,7 +257,10 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
var dynamicCost uint64 var dynamicCost uint64
dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize) dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize)
cost += dynamicCost // for tracing cost += dynamicCost // for tracing
if err != nil || !contract.UseGas(dynamicCost, in.evm.Config.Tracer, tracing.GasChangeIgnored) { if err != nil {
return nil, fmt.Errorf("%w: %v", ErrOutOfGas, err)
}
if !contract.UseGas(dynamicCost, in.evm.Config.Tracer, tracing.GasChangeIgnored) {
return nil, ErrOutOfGas return nil, ErrOutOfGas
} }

@ -167,7 +167,7 @@ type btCurve struct {
*btcec.KoblitzCurve *btcec.KoblitzCurve
} }
// Marshall converts a point given as (x, y) into a byte slice. // Marshal converts a point given as (x, y) into a byte slice.
func (curve btCurve) Marshal(x, y *big.Int) []byte { func (curve btCurve) Marshal(x, y *big.Int) []byte {
byteLen := (curve.Params().BitSize + 7) / 8 byteLen := (curve.Params().BitSize + 7) / 8

@ -134,6 +134,7 @@ func createKeyPair() (string, string) {
defer os.Remove(tmpKey.Name()) defer os.Remove(tmpKey.Name())
defer os.Remove(tmpKey.Name() + ".pub") defer os.Remove(tmpKey.Name() + ".pub")
defer os.Remove(tmpKey.Name() + ".sec") defer os.Remove(tmpKey.Name() + ".sec")
defer tmpKey.Close()
cmd := exec.Command("signify", "-G", "-n", "-p", tmpKey.Name()+".pub", "-s", tmpKey.Name()+".sec") cmd := exec.Command("signify", "-G", "-n", "-p", tmpKey.Name()+".pub", "-s", tmpKey.Name()+".sec")
if output, err := cmd.CombinedOutput(); err != nil { if output, err := cmd.CombinedOutput(); err != nil {
panic(fmt.Sprintf("could not verify the file: %v, output: \n%s", err, output)) panic(fmt.Sprintf("could not verify the file: %v, output: \n%s", err, output))

@ -20,6 +20,7 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"reflect" "reflect"
"slices"
"strings" "strings"
"testing" "testing"
@ -32,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256" "github.com/holiman/uint256"
"golang.org/x/exp/slices"
) )
var dumper = spew.ConfigState{Indent: " "} var dumper = spew.ConfigState{Indent: " "}

@ -101,8 +101,8 @@ type Ethereum struct {
shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully
} }
// New creates a new Ethereum object (including the // New creates a new Ethereum object (including the initialisation of the common Ethereum object),
// initialisation of the common Ethereum object) // whose lifecycle will be managed by the provided node.
func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Ensure configuration values are compatible and sane // Ensure configuration values are compatible and sane
if config.SyncMode == downloader.LightSync { if config.SyncMode == downloader.LightSync {
@ -111,7 +111,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if !config.SyncMode.IsValid() { if !config.SyncMode.IsValid() {
return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode) return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode)
} }
if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(common.Big0) <= 0 { if config.Miner.GasPrice == nil || config.Miner.GasPrice.Sign() <= 0 {
log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice) log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice)
config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice)
} }

@ -63,7 +63,7 @@ func (w *withdrawalQueue) gatherPending(maxCount int) []*types.Withdrawal {
case withdrawal := <-w.pending: case withdrawal := <-w.pending:
withdrawals = append(withdrawals, withdrawal) withdrawals = append(withdrawals, withdrawal)
if len(withdrawals) == maxCount { if len(withdrawals) == maxCount {
break return withdrawals
} }
default: default:
return withdrawals return withdrawals

@ -74,7 +74,7 @@ func startSimulatedBeaconEthService(t *testing.T, genesis *core.Genesis) (*node.
// send enough transactions to fill multiple blocks // send enough transactions to fill multiple blocks
func TestSimulatedBeaconSendWithdrawals(t *testing.T) { func TestSimulatedBeaconSendWithdrawals(t *testing.T) {
var withdrawals []types.Withdrawal var withdrawals []types.Withdrawal
txs := make(map[common.Hash]types.Transaction) txs := make(map[common.Hash]*types.Transaction)
var ( var (
// testKey is a private key to use for funding a tester account. // testKey is a private key to use for funding a tester account.
@ -110,7 +110,7 @@ func TestSimulatedBeaconSendWithdrawals(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("error signing transaction, err=%v", err) t.Fatalf("error signing transaction, err=%v", err)
} }
txs[tx.Hash()] = *tx txs[tx.Hash()] = tx
if err := ethService.APIBackend.SendTx(context.Background(), tx); err != nil { if err := ethService.APIBackend.SendTx(context.Background(), tx); err != nil {
t.Fatal("SendTx failed", err) t.Fatal("SendTx failed", err)

@ -289,6 +289,9 @@ func (d *Downloader) fetchBeaconHeaders(from uint64) error {
localHeaders = d.readHeaderRange(tail, int(count)) localHeaders = d.readHeaderRange(tail, int(count))
log.Warn("Retrieved beacon headers from local", "from", from, "count", count) log.Warn("Retrieved beacon headers from local", "from", from, "count", count)
} }
fsHeaderContCheckTimer := time.NewTimer(fsHeaderContCheck)
defer fsHeaderContCheckTimer.Stop()
for { for {
// Some beacon headers might have appeared since the last cycle, make // Some beacon headers might have appeared since the last cycle, make
// sure we're always syncing to all available ones // sure we're always syncing to all available ones
@ -381,8 +384,9 @@ func (d *Downloader) fetchBeaconHeaders(from uint64) error {
} }
// State sync still going, wait a bit for new headers and retry // State sync still going, wait a bit for new headers and retry
log.Trace("Pivot not yet committed, waiting...") log.Trace("Pivot not yet committed, waiting...")
fsHeaderContCheckTimer.Reset(fsHeaderContCheck)
select { select {
case <-time.After(fsHeaderContCheck): case <-fsHeaderContCheckTimer.C:
case <-d.cancelCh: case <-d.cancelCh:
return errCanceled return errCanceled
} }

@ -1276,7 +1276,10 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
var ( var (
mode = d.getMode() mode = d.getMode()
gotHeaders = false // Wait for batches of headers to process gotHeaders = false // Wait for batches of headers to process
timer = time.NewTimer(time.Second)
) )
defer timer.Stop()
for { for {
select { select {
case <-d.cancelCh: case <-d.cancelCh:
@ -1397,10 +1400,11 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode
if mode == FullSync || mode == SnapSync { if mode == FullSync || mode == SnapSync {
// If we've reached the allowed number of pending headers, stall a bit // If we've reached the allowed number of pending headers, stall a bit
for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
timer.Reset(time.Second)
select { select {
case <-d.cancelCh: case <-d.cancelCh:
return errCanceled return errCanceled
case <-time.After(time.Second): case <-timer.C:
} }
} }
// Otherwise insert the headers for content retrieval // Otherwise insert the headers for content retrieval
@ -1567,7 +1571,10 @@ func (d *Downloader) processSnapSyncContent() error {
var ( var (
oldPivot *fetchResult // Locked in pivot block, might change eventually oldPivot *fetchResult // Locked in pivot block, might change eventually
oldTail []*fetchResult // Downloaded content after the pivot oldTail []*fetchResult // Downloaded content after the pivot
timer = time.NewTimer(time.Second)
) )
defer timer.Stop()
for { for {
// Wait for the next batch of downloaded data to be available. If we have // Wait for the next batch of downloaded data to be available. If we have
// not yet reached the pivot point, wait blockingly as there's no need to // not yet reached the pivot point, wait blockingly as there's no need to
@ -1650,6 +1657,7 @@ func (d *Downloader) processSnapSyncContent() error {
oldPivot = P oldPivot = P
} }
// Wait for completion, occasionally checking for pivot staleness // Wait for completion, occasionally checking for pivot staleness
timer.Reset(time.Second)
select { select {
case <-sync.done: case <-sync.done:
if sync.err != nil { if sync.err != nil {
@ -1660,7 +1668,7 @@ func (d *Downloader) processSnapSyncContent() error {
} }
oldPivot = nil oldPivot = nil
case <-time.After(time.Second): case <-timer.C:
oldTail = afterP oldTail = afterP
continue continue
} }

@ -57,7 +57,7 @@ func newTester(t *testing.T) *downloadTester {
return newTesterWithNotification(t, nil) return newTesterWithNotification(t, nil)
} }
// newTester creates a new downloader test mocker. // newTesterWithNotification creates a new downloader test mocker.
func newTesterWithNotification(t *testing.T, success func()) *downloadTester { func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
freezer := t.TempDir() freezer := t.TempDir()
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)

@ -94,7 +94,7 @@ func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
} }
} }
// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with, // newSkeletonTestPeerWithHook creates a new mock peer to test the skeleton sync with,
// and sets an optional serve hook that can return headers for delivery instead // and sets an optional serve hook that can return headers for delivery instead
// of the predefined chain. Useful for emulating malicious behavior that would // of the predefined chain. Useful for emulating malicious behavior that would
// otherwise require dedicated peer types. // otherwise require dedicated peer types.

@ -20,6 +20,7 @@ import (
"errors" "errors"
"math/big" "math/big"
"math/rand" "math/rand"
"slices"
"testing" "testing"
"time" "time"
@ -1823,12 +1824,12 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
continue continue
} }
for _, hash := range hashes { for _, hash := range hashes {
if !containsHash(request.hashes, hash) { if !slices.Contains(request.hashes, hash) {
t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
} }
} }
for _, hash := range request.hashes { for _, hash := range request.hashes {
if !containsHash(hashes, hash) { if !slices.Contains(hashes, hash) {
t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
} }
} }
@ -1850,7 +1851,7 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
for hash := range fetcher.fetching { for hash := range fetcher.fetching {
var found bool var found bool
for _, req := range fetcher.requests { for _, req := range fetcher.requests {
if containsHash(req.hashes, hash) { if slices.Contains(req.hashes, hash) {
found = true found = true
break break
} }
@ -1891,12 +1892,12 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
continue continue
} }
for _, hash := range hashes { for _, hash := range hashes {
if !containsHash(request.hashes, hash) { if !slices.Contains(request.hashes, hash) {
t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
} }
} }
for _, hash := range request.hashes { for _, hash := range request.hashes {
if !containsHash(hashes, hash) { if !slices.Contains(hashes, hash) {
t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
} }
} }
@ -1909,7 +1910,7 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
for _, ann := range announces { for _, ann := range announces {
var found bool var found bool
for _, hs := range step.fetching { for _, hs := range step.fetching {
if containsHash(hs, ann.hash) { if slices.Contains(hs, ann.hash) {
found = true found = true
break break
} }
@ -1925,7 +1926,7 @@ func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
} }
} }
for hash := range fetcher.announced { for hash := range fetcher.announced {
if !containsHash(queued, hash) { if !slices.Contains(queued, hash) {
t.Errorf("step %d: hash %x extra in announced", i, hash) t.Errorf("step %d: hash %x extra in announced", i, hash)
} }
} }
@ -1984,16 +1985,6 @@ func containsHashInAnnounces(slice []announce, hash common.Hash) bool {
return false return false
} }
// containsHash returns whether a hash is contained within a hash slice.
func containsHash(slice []common.Hash, hash common.Hash) bool {
for _, have := range slice {
if have == hash {
return true
}
}
return false
}
// Tests that a transaction is forgotten after the timeout. // Tests that a transaction is forgotten after the timeout.
func TestTransactionForgotten(t *testing.T) { func TestTransactionForgotten(t *testing.T) {
fetcher := NewTxFetcher( fetcher := NewTxFetcher(

@ -20,6 +20,7 @@ import (
"context" "context"
"errors" "errors"
"math/big" "math/big"
"slices"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/bloombits"
@ -347,16 +348,6 @@ func (f *Filter) pendingLogs() []*types.Log {
return nil return nil
} }
// includes returns true if the element is present in the list.
func includes[T comparable](things []T, element T) bool {
for _, thing := range things {
if thing == element {
return true
}
}
return false
}
// filterLogs creates a slice of logs matching the given criteria. // filterLogs creates a slice of logs matching the given criteria.
func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log { func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {
var check = func(log *types.Log) bool { var check = func(log *types.Log) bool {
@ -366,7 +357,7 @@ func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []comm
if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber { if toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber {
return false return false
} }
if len(addresses) > 0 && !includes(addresses, log.Address) { if len(addresses) > 0 && !slices.Contains(addresses, log.Address) {
return false return false
} }
// If the to filtered topics is greater than the amount of topics in logs, skip. // If the to filtered topics is greater than the amount of topics in logs, skip.
@ -377,7 +368,7 @@ func filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []comm
if len(sub) == 0 { if len(sub) == 0 {
continue // empty rule set == wildcard continue // empty rule set == wildcard
} }
if !includes(sub, log.Topics[i]) { if !slices.Contains(sub, log.Topics[i]) {
return false return false
} }
} }

@ -95,7 +95,7 @@ func NewFilterSystem(backend Backend, config Config) *FilterSystem {
type logCacheElem struct { type logCacheElem struct {
logs []*types.Log logs []*types.Log
body atomic.Value body atomic.Pointer[types.Body]
} }
// cachedLogElem loads block logs from the backend and caches the result. // cachedLogElem loads block logs from the backend and caches the result.
@ -133,7 +133,7 @@ func (sys *FilterSystem) cachedLogElem(ctx context.Context, blockHash common.Has
func (sys *FilterSystem) cachedGetBody(ctx context.Context, elem *logCacheElem, hash common.Hash, number uint64) (*types.Body, error) { func (sys *FilterSystem) cachedGetBody(ctx context.Context, elem *logCacheElem, hash common.Hash, number uint64) (*types.Body, error) {
if body := elem.body.Load(); body != nil { if body := elem.body.Load(); body != nil {
return body.(*types.Body), nil return body, nil
} }
body, err := sys.backend.GetBody(ctx, hash, rpc.BlockNumber(number)) body, err := sys.backend.GetBody(ctx, hash, rpc.BlockNumber(number))
if err != nil { if err != nil {

@ -442,7 +442,7 @@ func TestInvalidLogFilterCreation(t *testing.T) {
} }
} }
// TestLogFilterUninstall tests invalid getLogs requests // TestInvalidGetLogsRequest tests invalid getLogs requests
func TestInvalidGetLogsRequest(t *testing.T) { func TestInvalidGetLogsRequest(t *testing.T) {
t.Parallel() t.Parallel()

@ -23,6 +23,7 @@ import (
"fmt" "fmt"
"math" "math"
"math/big" "math/big"
"slices"
"sync/atomic" "sync/atomic"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -30,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/exp/slices"
) )
var ( var (

@ -19,6 +19,7 @@ package gasprice
import ( import (
"context" "context"
"math/big" "math/big"
"slices"
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -30,7 +31,6 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/rpc"
"golang.org/x/exp/slices"
) )
const sampleNumber = 3 // Number of transactions sampled in a block const sampleNumber = 3 // Number of transactions sampled in a block

@ -466,9 +466,7 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
largeTxs int // Number of large transactions to announce only largeTxs int // Number of large transactions to announce only
directCount int // Number of transactions sent directly to peers (duplicates included) directCount int // Number of transactions sent directly to peers (duplicates included)
directPeers int // Number of peers that were sent transactions directly
annCount int // Number of transactions announced across all peers (duplicates included) annCount int // Number of transactions announced across all peers (duplicates included)
annPeers int // Number of peers announced about transactions
txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly
annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce
@ -525,17 +523,15 @@ func (h *handler) BroadcastTransactions(txs types.Transactions) {
} }
} }
for peer, hashes := range txset { for peer, hashes := range txset {
directPeers++
directCount += len(hashes) directCount += len(hashes)
peer.AsyncSendTransactions(hashes) peer.AsyncSendTransactions(hashes)
} }
for peer, hashes := range annos { for peer, hashes := range annos {
annPeers++
annCount += len(hashes) annCount += len(hashes)
peer.AsyncSendPooledTransactionHashes(hashes) peer.AsyncSendPooledTransactionHashes(hashes)
} }
log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs, log.Debug("Distributed transactions", "plaintxs", len(txs)-blobTxs-largeTxs, "blobtxs", blobTxs, "largetxs", largeTxs,
"bcastpeers", directPeers, "bcastcount", directCount, "annpeers", annPeers, "anncount", annCount) "bcastpeers", len(txset), "bcastcount", directCount, "annpeers", len(annos), "anncount", annCount)
} }
// txBroadcastLoop announces new transactions to connected peers. // txBroadcastLoop announces new transactions to connected peers.

@ -390,7 +390,7 @@ func testTransactionPropagation(t *testing.T, protocol uint) {
} }
// Interconnect all the sink handlers with the source handler // Interconnect all the sink handlers with the source handler
for i, sink := range sinks { for i, sink := range sinks {
sink := sink // Closure for gorotuine below sink := sink // Closure for goroutine below
sourcePipe, sinkPipe := p2p.MsgPipe() sourcePipe, sinkPipe := p2p.MsgPipe()
defer sourcePipe.Close() defer sourcePipe.Close()

@ -63,7 +63,7 @@ func newTestBackend(blocks int) *testBackend {
return newTestBackendWithGenerator(blocks, false, nil) return newTestBackendWithGenerator(blocks, false, nil)
} }
// newTestBackend creates a chain with a number of explicitly defined blocks and // newTestBackendWithGenerator creates a chain with a number of explicitly defined blocks and
// wraps it into a mock backend. // wraps it into a mock backend.
func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend { func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend {
var ( var (

@ -54,4 +54,9 @@ var (
// skipStorageHealingGauge is the metric to track how many storages are retrieved // skipStorageHealingGauge is the metric to track how many storages are retrieved
// in multiple requests but healing is not necessary. // in multiple requests but healing is not necessary.
skipStorageHealingGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/noheal", nil) skipStorageHealingGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/noheal", nil)
// largeStorageDiscardGauge is the metric to track how many chunked storages are
// discarded during the snap sync.
largeStorageDiscardGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/discard", nil)
largeStorageResumedGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/chunk/resume", nil)
) )

@ -0,0 +1,154 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package snap
import (
"encoding/json"
"testing"
"github.com/ethereum/go-ethereum/common"
)
// Legacy sync progress definitions
type legacyStorageTask struct {
Next common.Hash // Next account to sync in this interval
Last common.Hash // Last account to sync in this interval
}
type legacyAccountTask struct {
Next common.Hash // Next account to sync in this interval
Last common.Hash // Last account to sync in this interval
SubTasks map[common.Hash][]*legacyStorageTask // Storage intervals needing fetching for large contracts
}
type legacyProgress struct {
Tasks []*legacyAccountTask // The suspended account tasks (contract tasks within)
}
func compareProgress(a legacyProgress, b SyncProgress) bool {
if len(a.Tasks) != len(b.Tasks) {
return false
}
for i := 0; i < len(a.Tasks); i++ {
if a.Tasks[i].Next != b.Tasks[i].Next {
return false
}
if a.Tasks[i].Last != b.Tasks[i].Last {
return false
}
// new fields are not checked here
if len(a.Tasks[i].SubTasks) != len(b.Tasks[i].SubTasks) {
return false
}
for addrHash, subTasksA := range a.Tasks[i].SubTasks {
subTasksB, ok := b.Tasks[i].SubTasks[addrHash]
if !ok || len(subTasksB) != len(subTasksA) {
return false
}
for j := 0; j < len(subTasksA); j++ {
if subTasksA[j].Next != subTasksB[j].Next {
return false
}
if subTasksA[j].Last != subTasksB[j].Last {
return false
}
}
}
}
return true
}
func makeLegacyProgress() legacyProgress {
return legacyProgress{
Tasks: []*legacyAccountTask{
{
Next: common.Hash{},
Last: common.Hash{0x77},
SubTasks: map[common.Hash][]*legacyStorageTask{
common.Hash{0x1}: {
{
Next: common.Hash{},
Last: common.Hash{0xff},
},
},
},
},
{
Next: common.Hash{0x88},
Last: common.Hash{0xff},
},
},
}
}
func convertLegacy(legacy legacyProgress) SyncProgress {
var progress SyncProgress
for i, task := range legacy.Tasks {
subTasks := make(map[common.Hash][]*storageTask)
for owner, list := range task.SubTasks {
var cpy []*storageTask
for i := 0; i < len(list); i++ {
cpy = append(cpy, &storageTask{
Next: list[i].Next,
Last: list[i].Last,
})
}
subTasks[owner] = cpy
}
accountTask := &accountTask{
Next: task.Next,
Last: task.Last,
SubTasks: subTasks,
}
if i == 0 {
accountTask.StorageCompleted = []common.Hash{{0xaa}, {0xbb}} // fulfill new fields
}
progress.Tasks = append(progress.Tasks, accountTask)
}
return progress
}
func TestSyncProgressCompatibility(t *testing.T) {
// Decode serialized bytes of legacy progress, backward compatibility
legacy := makeLegacyProgress()
blob, err := json.Marshal(legacy)
if err != nil {
t.Fatalf("Failed to marshal progress %v", err)
}
var dec SyncProgress
if err := json.Unmarshal(blob, &dec); err != nil {
t.Fatalf("Failed to unmarshal progress %v", err)
}
if !compareProgress(legacy, dec) {
t.Fatal("sync progress is not backward compatible")
}
// Decode serialized bytes of new format progress
progress := convertLegacy(legacy)
blob, err = json.Marshal(progress)
if err != nil {
t.Fatalf("Failed to marshal progress %v", err)
}
var legacyDec legacyProgress
if err := json.Unmarshal(blob, &legacyDec); err != nil {
t.Fatalf("Failed to unmarshal progress %v", err)
}
if !compareProgress(legacyDec, progress) {
t.Fatal("sync progress is not forward compatible")
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save