Merge branch 'master' into ulerdogan-secp256r1

pull/30043/head
Ulaş Erdoğan 3 months ago
commit e7ebe92f27
  1. 1
      .github/CODEOWNERS
  2. 5
      .github/workflows/go.yml
  3. 19
      .golangci.yml
  4. 1
      .mailmap
  5. 74
      .travis.yml
  6. 4
      Dockerfile
  7. 4
      Dockerfile.alltools
  8. 21
      Makefile
  9. 6
      README.md
  10. 2
      accounts/abi/abi.go
  11. 2
      accounts/abi/argument.go
  12. 2
      accounts/abi/bind/auth.go
  13. 4
      accounts/abi/bind/backends/simulated.go
  14. 7
      accounts/abi/bind/base.go
  15. 100
      accounts/abi/bind/bind_test.go
  16. 7
      accounts/abi/bind/util_test.go
  17. 4
      accounts/abi/reflect.go
  18. 9
      accounts/abi/type.go
  19. 2
      accounts/abi/type_test.go
  20. 2
      accounts/accounts.go
  21. 15
      accounts/external/backend.go
  22. 2
      accounts/keystore/account_cache.go
  23. 15
      accounts/keystore/account_cache_test.go
  24. 18
      accounts/keystore/keystore.go
  25. 40
      accounts/keystore/keystore_test.go
  26. 3
      accounts/scwallet/hub.go
  27. 5
      accounts/scwallet/securechannel.go
  28. 12
      accounts/scwallet/wallet.go
  29. 8
      accounts/usbwallet/hub.go
  30. 2
      accounts/usbwallet/ledger.go
  31. 2
      accounts/usbwallet/trezor/trezor.go
  32. 6
      accounts/usbwallet/wallet.go
  33. 2
      appveyor.yml
  34. 163
      beacon/blsync/block_sync.go
  35. 160
      beacon/blsync/block_sync_test.go
  36. 115
      beacon/blsync/client.go
  37. 129
      beacon/blsync/config.go
  38. 150
      beacon/blsync/engineclient.go
  39. 43
      beacon/engine/types.go
  40. 114
      beacon/light/api/api_server.go
  41. 578
      beacon/light/api/light_api.go
  42. 25
      beacon/light/committee_chain.go
  43. 4
      beacon/light/committee_chain_test.go
  44. 161
      beacon/light/head_tracker.go
  45. 403
      beacon/light/request/scheduler.go
  46. 126
      beacon/light/request/scheduler_test.go
  47. 451
      beacon/light/request/server.go
  48. 182
      beacon/light/request/server_test.go
  49. 202
      beacon/light/sync/head_sync.go
  50. 183
      beacon/light/sync/head_sync_test.go
  51. 259
      beacon/light/sync/test_helpers.go
  52. 47
      beacon/light/sync/types.go
  53. 398
      beacon/light/sync/update_sync.go
  54. 219
      beacon/light/sync/update_sync_test.go
  55. 2
      beacon/params/params.go
  56. 110
      beacon/types/beacon_block.go
  57. 77
      beacon/types/beacon_block_test.go
  58. 38
      beacon/types/config.go
  59. 80
      beacon/types/exec_header.go
  60. 141
      beacon/types/exec_payload.go
  61. 11
      beacon/types/header.go
  62. 100
      beacon/types/light_sync.go
  63. 1703
      beacon/types/testdata/block_capella.json
  64. 2644
      beacon/types/testdata/block_deneb.json
  65. 138
      build/checksums.txt
  66. 57
      build/ci.go
  67. 2
      build/deb/ethereum/deb.control
  68. 7
      build/deb/ethereum/deb.rules
  69. 7
      build/update-license.go
  70. 122
      cmd/blsync/main.go
  71. 2
      cmd/clef/README.md
  72. 2
      cmd/clef/main.go
  73. 24
      cmd/clef/rules.md
  74. 57
      cmd/devp2p/discv4cmd.go
  75. 2
      cmd/devp2p/dns_route53.go
  76. 4
      cmd/devp2p/enrcmd.go
  77. 12
      cmd/devp2p/internal/ethtest/chain.go
  78. 5
      cmd/devp2p/internal/ethtest/conn.go
  79. 4
      cmd/devp2p/internal/ethtest/engine.go
  80. 7
      cmd/devp2p/internal/ethtest/snap.go
  81. 108
      cmd/devp2p/internal/ethtest/suite.go
  82. 4
      cmd/devp2p/internal/ethtest/suite_test.go
  83. 29
      cmd/devp2p/internal/ethtest/transaction.go
  84. 10
      cmd/devp2p/internal/v4test/framework.go
  85. 3
      cmd/devp2p/internal/v5test/discv5tests.go
  86. 9
      cmd/devp2p/internal/v5test/framework.go
  87. 10
      cmd/devp2p/main.go
  88. 2
      cmd/devp2p/nodeset.go
  89. 6
      cmd/devp2p/nodesetcmd.go
  90. 8
      cmd/devp2p/rlpxcmd.go
  91. 325
      cmd/era/main.go
  92. 2
      cmd/ethkey/README.md
  93. 4
      cmd/evm/blockrunner.go
  94. 4
      cmd/evm/internal/t8ntool/block.go
  95. 60
      cmd/evm/internal/t8ntool/execution.go
  96. 4
      cmd/evm/internal/t8ntool/flags.go
  97. 81
      cmd/evm/internal/t8ntool/tracewriter.go
  98. 2
      cmd/evm/internal/t8ntool/transaction.go
  99. 55
      cmd/evm/internal/t8ntool/transition.go
  100. 4
      cmd/evm/internal/t8ntool/tx_iterator.go
  101. Some files were not shown because too many files have changed in this diff Show More

@ -10,6 +10,7 @@ core/ @karalabe @holiman @rjl493456442
eth/ @karalabe @holiman @rjl493456442 eth/ @karalabe @holiman @rjl493456442
eth/catalyst/ @gballet eth/catalyst/ @gballet
eth/tracers/ @s1na eth/tracers/ @s1na
core/tracing/ @s1na
graphql/ @s1na graphql/ @s1na
les/ @zsfelfoldi @rjl493456442 les/ @zsfelfoldi @rjl493456442
light/ @zsfelfoldi @rjl493456442 light/ @zsfelfoldi @rjl493456442

@ -11,11 +11,12 @@ jobs:
build: build:
runs-on: self-hosted runs-on: self-hosted
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v4
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v2 uses: actions/setup-go@v5
with: with:
go-version: 1.21.4 go-version: 1.21.4
cache: false
- name: Run tests - name: Run tests
run: go test -short ./... run: go test -short ./...
env: env:

@ -6,8 +6,6 @@ run:
# default is true. Enables skipping of directories: # default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs-use-default: true skip-dirs-use-default: true
skip-files:
- core/genesis_alloc.go
linters: linters:
disable-all: true disable-all: true
@ -25,7 +23,10 @@ linters:
- durationcheck - durationcheck
- exportloopref - exportloopref
- whitespace - whitespace
- revive # only certain checks enabled
### linters we tried and will not be using:
###
# - structcheck # lots of false positives # - structcheck # lots of false positives
# - errcheck #lot of false positives # - errcheck #lot of false positives
# - contextcheck # - contextcheck
@ -38,13 +39,27 @@ linters:
linters-settings: linters-settings:
gofmt: gofmt:
simplify: true simplify: true
revive:
enable-all-rules: false
# here we enable specific useful rules
# see https://golangci-lint.run/usage/linters/#revive for supported rules
rules:
- name: receiver-naming
severity: warning
disabled: false
exclude: [""]
issues: issues:
exclude-files:
- core/genesis_alloc.go
exclude-rules: exclude-rules:
- path: crypto/bn256/cloudflare/optate.go - path: crypto/bn256/cloudflare/optate.go
linters: linters:
- deadcode - deadcode
- staticcheck - staticcheck
- path: crypto/bn256/
linters:
- revive
- path: internal/build/pgp.go - path: internal/build/pgp.go
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.' text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
- path: core/vm/contracts.go - path: core/vm/contracts.go

@ -56,7 +56,6 @@ Diederik Loerakker <proto@protolambda.com>
Dimitry Khokhlov <winsvega@mail.ru> Dimitry Khokhlov <winsvega@mail.ru>
Domino Valdano <dominoplural@gmail.com> Domino Valdano <dominoplural@gmail.com>
Domino Valdano <dominoplural@gmail.com> <jeff@okcupid.com>
Edgar Aroutiounian <edgar.factorial@gmail.com> Edgar Aroutiounian <edgar.factorial@gmail.com>

@ -15,8 +15,8 @@ jobs:
if: type = push if: type = push
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: noble
go: 1.21.x go: 1.22.x
env: env:
- docker - docker
services: services:
@ -32,8 +32,8 @@ jobs:
if: type = push if: type = push
os: linux os: linux
arch: arm64 arch: arm64
dist: bionic dist: noble
go: 1.21.x go: 1.22.x
env: env:
- docker - docker
services: services:
@ -49,21 +49,20 @@ jobs:
- stage: build - stage: build
if: type = push if: type = push
os: linux os: linux
dist: bionic dist: noble
sudo: required sudo: required
go: 1.21.x go: 1.22.x
env: env:
- azure-linux - azure-linux
git: git:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
addons:
apt:
packages:
- gcc-multilib
script: script:
# Build for the primary platforms that Trusty can manage # build amd64
- go run build/ci.go install -dlgo - go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# build 386
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends install gcc-multilib
- go run build/ci.go install -dlgo -arch 386 - go run build/ci.go install -dlgo -arch 386
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
@ -85,7 +84,7 @@ jobs:
if: type = push if: type = push
os: osx os: osx
osx_image: xcode14.2 osx_image: xcode14.2
go: 1.21.x go: 1.22.x
env: env:
- azure-osx - azure-osx
git: git:
@ -98,48 +97,34 @@ jobs:
# These builders run the tests # These builders run the tests
- stage: build - stage: build
if: type = push
os: linux os: linux
arch: amd64 arch: amd64
dist: bionic dist: noble
go: 1.21.x go: 1.22.x
script:
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES
- stage: build
if: type = pull_request
os: linux
arch: arm64
dist: bionic
go: 1.20.x
script: script:
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES - travis_wait 45 go run build/ci.go test $TEST_PACKAGES
- stage: build - stage: build
if: type = push
os: linux os: linux
dist: bionic dist: noble
go: 1.20.x go: 1.21.x
script: script:
- travis_wait 30 go run build/ci.go test $TEST_PACKAGES - travis_wait 45 go run build/ci.go test $TEST_PACKAGES
# This builder does the Ubuntu PPA nightly uploads # This builder does the Ubuntu PPA nightly uploads
- stage: build - stage: build
if: type = cron || (type = push && tag ~= /^v[0-9]/) if: type = cron || (type = push && tag ~= /^v[0-9]/)
os: linux os: linux
dist: bionic dist: noble
go: 1.21.x go: 1.22.x
env: env:
- ubuntu-ppa - ubuntu-ppa
git: git:
submodules: false # avoid cloning ethereum/tests submodules: false # avoid cloning ethereum/tests
addons: before_install:
apt: - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install devscripts debhelper dput fakeroot
packages:
- devscripts
- debhelper
- dput
- fakeroot
- python-bzrlib
- python-paramiko
script: script:
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts - echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" - go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
@ -148,8 +133,8 @@ jobs:
- stage: build - stage: build
if: type = cron if: type = cron
os: linux os: linux
dist: bionic dist: noble
go: 1.21.x go: 1.22.x
env: env:
- azure-purge - azure-purge
git: git:
@ -161,8 +146,9 @@ jobs:
- stage: build - stage: build
if: type = cron if: type = cron
os: linux os: linux
dist: bionic dist: noble
go: 1.21.x go: 1.22.x
env:
- racetests
script: script:
- travis_wait 30 go run build/ci.go test -race $TEST_PACKAGES - travis_wait 60 go run build/ci.go test -race $TEST_PACKAGES

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.21-alpine as builder FROM golang:1.22-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git
@ -25,7 +25,7 @@ COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp EXPOSE 8545 8546 30303 30303/udp
ENTRYPOINT ["geth"] ENTRYPOINT ["geth"]
# Add some metadata labels to help programatic image consumption # Add some metadata labels to help programmatic image consumption
ARG COMMIT="" ARG COMMIT=""
ARG VERSION="" ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""

@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""
# Build Geth in a stock Go builder container # Build Geth in a stock Go builder container
FROM golang:1.21-alpine as builder FROM golang:1.22-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git RUN apk add --no-cache gcc musl-dev linux-headers git
@ -24,7 +24,7 @@ COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp EXPOSE 8545 8546 30303 30303/udp
# Add some metadata labels to help programatic image consumption # Add some metadata labels to help programmatic image consumption
ARG COMMIT="" ARG COMMIT=""
ARG VERSION="" ARG VERSION=""
ARG BUILDNUM="" ARG BUILDNUM=""

@ -2,26 +2,35 @@
# with Go source code. If you know what GOPATH is then you probably # with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make. # don't need to bother with make.
.PHONY: geth android ios evm all test clean .PHONY: geth all test lint fmt clean devtools help
GOBIN = ./build/bin GOBIN = ./build/bin
GO ?= latest GO ?= latest
GORUN = go run GORUN = go run
#? geth: Build geth.
geth: geth:
$(GORUN) build/ci.go install ./cmd/geth $(GORUN) build/ci.go install ./cmd/geth
@echo "Done building." @echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth." @echo "Run \"$(GOBIN)/geth\" to launch geth."
#? all: Build all packages and executables.
all: all:
$(GORUN) build/ci.go install $(GORUN) build/ci.go install
#? test: Run the tests.
test: all test: all
$(GORUN) build/ci.go test $(GORUN) build/ci.go test
#? lint: Run certain pre-selected linters.
lint: ## Run linters. lint: ## Run linters.
$(GORUN) build/ci.go lint $(GORUN) build/ci.go lint
#? fmt: Ensure consistent code formatting.
fmt:
gofmt -s -w $(shell find . -name "*.go")
#? clean: Clean go cache, built executables, and the auto generated folder.
clean: clean:
go clean -cache go clean -cache
rm -fr build/_workspace/pkg/ $(GOBIN)/* rm -fr build/_workspace/pkg/ $(GOBIN)/*
@ -29,6 +38,7 @@ clean:
# The devtools target installs tools required for 'go generate'. # The devtools target installs tools required for 'go generate'.
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
#? devtools: Install recommended developer tools.
devtools: devtools:
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
env GOBIN= go install github.com/fjl/gencodec@latest env GOBIN= go install github.com/fjl/gencodec@latest
@ -36,3 +46,12 @@ devtools:
env GOBIN= go install ./cmd/abigen env GOBIN= go install ./cmd/abigen
@type "solc" 2> /dev/null || echo 'Please install solc' @type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc' @type "protoc" 2> /dev/null || echo 'Please install protoc'
#? help: Get more info on make commands.
help: Makefile
@echo ''
@echo 'Usage:'
@echo ' make [target]'
@echo ''
@echo 'Targets:'
@sed -n 's/^#?//p' $< | column -t -s ':' | sort | sed -e 's/^/ /'

@ -1,12 +1,12 @@
## Go Ethereum ## Go Ethereum
Official Golang execution layer implementation of the Ethereum protocol. Golang execution layer implementation of the Ethereum protocol.
[![API Reference]( [![API Reference](
https://pkg.go.dev/badge/github.com/ethereum/go-ethereum https://pkg.go.dev/badge/github.com/ethereum/go-ethereum
)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) )](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) [![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum) [![Travis](https://app.travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://app.travis-ci.com/github/ethereum/go-ethereum)
[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) [![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv)
Automated builds are available for stable releases and the unstable master branch. Binary Automated builds are available for stable releases and the unstable master branch. Binary
@ -16,7 +16,7 @@ archives are published at https://geth.ethereum.org/downloads/.
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth). For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth).
Building `geth` requires both a Go (version 1.19 or later) and a C compiler. You can install Building `geth` requires both a Go (version 1.21 or later) and a C compiler. You can install
them using your favourite package manager. Once the dependencies are installed, run them using your favourite package manager. Once the dependencies are installed, run
```shell ```shell

@ -29,7 +29,7 @@ import (
) )
// The ABI holds information about a contract's context and available // The ABI holds information about a contract's context and available
// invokable methods. It will allow you to type check function calls and // invocable methods. It will allow you to type check function calls and
// packs data accordingly. // packs data accordingly.
type ABI struct { type ABI struct {
Constructor Method Constructor Method

@ -127,7 +127,7 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
return arguments.copyAtomic(v, values[0]) return arguments.copyAtomic(v, values[0])
} }
// unpackAtomic unpacks ( hexdata -> go ) a single value // copyAtomic copies ( hexdata -> go ) a single value
func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error { func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error {
dst := reflect.ValueOf(v).Elem() dst := reflect.ValueOf(v).Elem()
src := reflect.ValueOf(marshalledValues) src := reflect.ValueOf(marshalledValues)

@ -142,10 +142,10 @@ func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accou
// NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer // NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer
// from a single private key. // from a single private key.
func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) { func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) {
keyAddr := crypto.PubkeyToAddress(key.PublicKey)
if chainID == nil { if chainID == nil {
return nil, ErrNoChainID return nil, ErrNoChainID
} }
keyAddr := crypto.PubkeyToAddress(key.PublicKey)
signer := types.LatestSignerForChainID(chainID) signer := types.LatestSignerForChainID(chainID)
return &TransactOpts{ return &TransactOpts{
From: keyAddr, From: keyAddr,

@ -20,7 +20,7 @@ import (
"context" "context"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/ethereum/go-ethereum/ethclient/simulated"
) )
@ -43,7 +43,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parentHash common.Hash) err
// //
// Deprecated: please use simulated.Backend from package // Deprecated: please use simulated.Backend from package
// github.com/ethereum/go-ethereum/ethclient/simulated instead. // github.com/ethereum/go-ethereum/ethclient/simulated instead.
func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { func NewSimulatedBackend(alloc types.GenesisAlloc, gasLimit uint64) *SimulatedBackend {
b := simulated.NewBackend(alloc, simulated.WithBlockGasLimit(gasLimit)) b := simulated.NewBackend(alloc, simulated.WithBlockGasLimit(gasLimit))
return &SimulatedBackend{ return &SimulatedBackend{
Backend: b, Backend: b,

@ -461,7 +461,7 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
sub, err := event.NewSubscription(func(quit <-chan struct{}) error { sub := event.NewSubscription(func(quit <-chan struct{}) error {
for _, log := range buff { for _, log := range buff {
select { select {
case logs <- log: case logs <- log:
@ -470,11 +470,8 @@ func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]int
} }
} }
return nil return nil
}), nil })
if err != nil {
return nil, nil, err
}
return logs, sub, nil return logs, sub, nil
} }

@ -289,7 +289,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -297,7 +297,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy an interaction tester contract and call a transaction on it // Deploy an interaction tester contract and call a transaction on it
@ -345,7 +345,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -353,7 +353,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a tuple tester contract and execute a structured call on it // Deploy a tuple tester contract and execute a structured call on it
@ -391,7 +391,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -399,7 +399,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a tuple tester contract and execute a structured call on it // Deploy a tuple tester contract and execute a structured call on it
@ -449,7 +449,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -457,7 +457,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a slice tester contract and execute a n array call on it // Deploy a slice tester contract and execute a n array call on it
@ -497,7 +497,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -505,7 +505,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a default method invoker contract and execute its default method // Deploy a default method invoker contract and execute its default method
@ -564,7 +564,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -572,7 +572,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a structs method invoker contract and execute its default method // Deploy a structs method invoker contract and execute its default method
@ -610,12 +610,12 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
`, `,
` `
// Create a simulator and wrap a non-deployed contract // Create a simulator and wrap a non-deployed contract
sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000)) sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000))
defer sim.Close() defer sim.Close()
nonexistent, err := NewNonExistent(common.Address{}, sim) nonexistent, err := NewNonExistent(common.Address{}, sim)
@ -649,12 +649,12 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
`, `,
` `
// Create a simulator and wrap a non-deployed contract // Create a simulator and wrap a non-deployed contract
sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000)) sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000))
defer sim.Close() defer sim.Close()
nonexistent, err := NewNonExistentStruct(common.Address{}, sim) nonexistent, err := NewNonExistentStruct(common.Address{}, sim)
@ -696,7 +696,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -704,7 +704,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a funky gas pattern contract // Deploy a funky gas pattern contract
@ -746,7 +746,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -754,7 +754,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a sender tester contract and execute a structured call on it // Deploy a sender tester contract and execute a structured call on it
@ -821,7 +821,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -829,7 +829,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a underscorer tester contract and execute a structured call on it // Deploy a underscorer tester contract and execute a structured call on it
@ -915,7 +915,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -923,7 +923,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy an eventer contract // Deploy an eventer contract
@ -1105,7 +1105,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -1113,7 +1113,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
//deploy the test contract //deploy the test contract
@ -1240,7 +1240,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
@ -1248,7 +1248,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
_, _, contract, err := DeployTuple(auth, sim) _, _, contract, err := DeployTuple(auth, sim)
@ -1382,7 +1382,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -1390,7 +1390,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
//deploy the test contract //deploy the test contract
@ -1448,14 +1448,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
// Initialize test accounts // Initialize test accounts
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// deploy the test contract // deploy the test contract
@ -1537,7 +1537,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
`, `,
` `
// Initialize test accounts // Initialize test accounts
@ -1545,7 +1545,7 @@ var bindTests = []struct {
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
// Deploy registrar contract // Deploy registrar contract
sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@ -1600,14 +1600,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
`, `,
` `
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
// Deploy registrar contract // Deploy registrar contract
sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@ -1661,7 +1661,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
@ -1669,7 +1669,7 @@ var bindTests = []struct {
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000)
defer sim.Close() defer sim.Close()
// Deploy a tester contract and execute a structured call on it // Deploy a tester contract and execute a structured call on it
@ -1722,14 +1722,14 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
`, `,
` `
key, _ := crypto.GenerateKey() key, _ := crypto.GenerateKey()
addr := crypto.PubkeyToAddress(key.PublicKey) addr := crypto.PubkeyToAddress(key.PublicKey)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000) sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000)
defer sim.Close() defer sim.Close()
opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
@ -1810,7 +1810,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -1818,7 +1818,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
defer sim.Close() defer sim.Close()
@ -1881,7 +1881,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -1889,7 +1889,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
defer sim.Close() defer sim.Close()
@ -1934,7 +1934,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -1942,7 +1942,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
defer sim.Close() defer sim.Close()
@ -1983,7 +1983,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -1991,7 +1991,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
defer sim.Close() defer sim.Close()
@ -2024,7 +2024,7 @@ var bindTests = []struct {
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/ethconfig"
`, `,
@ -2032,7 +2032,7 @@ var bindTests = []struct {
var ( var (
key, _ = crypto.GenerateKey() key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
) )
_, tx, _, err := DeployRangeKeyword(user, sim) _, tx, _, err := DeployRangeKeyword(user, sim)
if err != nil { if err != nil {

@ -25,7 +25,6 @@ import (
"github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/ethereum/go-ethereum/ethclient/simulated"
@ -57,7 +56,7 @@ func TestWaitDeployed(t *testing.T) {
t.Parallel() t.Parallel()
for name, test := range waitDeployedTests { for name, test := range waitDeployedTests {
backend := simulated.NewBackend( backend := simulated.NewBackend(
core.GenesisAlloc{ types.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
}, },
) )
@ -65,7 +64,7 @@ func TestWaitDeployed(t *testing.T) {
// Create the transaction // Create the transaction
head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough
gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei))
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code)) tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code))
tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey) tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey)
@ -102,7 +101,7 @@ func TestWaitDeployed(t *testing.T) {
func TestWaitDeployedCornerCases(t *testing.T) { func TestWaitDeployedCornerCases(t *testing.T) {
backend := simulated.NewBackend( backend := simulated.NewBackend(
core.GenesisAlloc{ types.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)},
}, },
) )

@ -24,7 +24,7 @@ import (
"strings" "strings"
) )
// ConvertType converts an interface of a runtime type into a interface of the // ConvertType converts an interface of a runtime type into an interface of the
// given type, e.g. turn this code: // given type, e.g. turn this code:
// //
// var fields []reflect.StructField // var fields []reflect.StructField
@ -33,7 +33,7 @@ import (
// Name: "X", // Name: "X",
// Type: reflect.TypeOf(new(big.Int)), // Type: reflect.TypeOf(new(big.Int)),
// Tag: reflect.StructTag("json:\"" + "x" + "\""), // Tag: reflect.StructTag("json:\"" + "x" + "\""),
// } // })
// //
// into: // into:
// //

@ -64,6 +64,9 @@ type Type struct {
var ( var (
// typeRegex parses the abi sub types // typeRegex parses the abi sub types
typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?") typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?")
// sliceSizeRegex grab the slice size
sliceSizeRegex = regexp.MustCompile("[0-9]+")
) )
// NewType creates a new reflection type of abi type given in t. // NewType creates a new reflection type of abi type given in t.
@ -91,8 +94,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
// grab the last cell and create a type from there // grab the last cell and create a type from there
sliced := t[i:] sliced := t[i:]
// grab the slice size with regexp // grab the slice size with regexp
re := regexp.MustCompile("[0-9]+") intz := sliceSizeRegex.FindAllString(sliced, -1)
intz := re.FindAllString(sliced, -1)
if len(intz) == 0 { if len(intz) == 0 {
// is a slice // is a slice
@ -179,9 +181,6 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported") return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
} }
fieldName := ResolveNameConflict(name, func(s string) bool { return used[s] }) fieldName := ResolveNameConflict(name, func(s string) bool { return used[s] })
if err != nil {
return Type{}, err
}
used[fieldName] = true used[fieldName] = true
if !isValidFieldName(fieldName) { if !isValidFieldName(fieldName) {
return Type{}, fmt.Errorf("field %d has invalid name", idx) return Type{}, fmt.Errorf("field %d has invalid name", idx)

@ -25,7 +25,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
) )
// typeWithoutStringer is a alias for the Type type which simply doesn't implement // typeWithoutStringer is an alias for the Type type which simply doesn't implement
// the stringer interface to allow printing type details in the tests below. // the stringer interface to allow printing type details in the tests below.
type typeWithoutStringer Type type typeWithoutStringer Type

@ -195,7 +195,7 @@ func TextHash(data []byte) []byte {
// //
// This gives context to the signed message and prevents signing of transactions. // This gives context to the signed message and prevents signing of transactions.
func TextAndHash(data []byte) ([]byte, string) { func TextAndHash(data []byte) ([]byte, string) {
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), string(data)) msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data)
hasher := sha3.NewLegacyKeccak256() hasher := sha3.NewLegacyKeccak256()
hasher.Write([]byte(msg)) hasher.Write([]byte(msg))
return hasher.Sum(nil), msg return hasher.Sum(nil), msg

@ -205,7 +205,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
to = &t to = &t
} }
args := &apitypes.SendTxArgs{ args := &apitypes.SendTxArgs{
Data: &data, Input: &data,
Nonce: hexutil.Uint64(tx.Nonce()), Nonce: hexutil.Uint64(tx.Nonce()),
Value: hexutil.Big(*tx.Value()), Value: hexutil.Big(*tx.Value()),
Gas: hexutil.Uint64(tx.Gas()), Gas: hexutil.Uint64(tx.Gas()),
@ -215,7 +215,7 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
switch tx.Type() { switch tx.Type() {
case types.LegacyTxType, types.AccessListTxType: case types.LegacyTxType, types.AccessListTxType:
args.GasPrice = (*hexutil.Big)(tx.GasPrice()) args.GasPrice = (*hexutil.Big)(tx.GasPrice())
case types.DynamicFeeTxType: case types.DynamicFeeTxType, types.BlobTxType:
args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap()) args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap())
args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap()) args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap())
default: default:
@ -235,6 +235,17 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio
accessList := tx.AccessList() accessList := tx.AccessList()
args.AccessList = &accessList args.AccessList = &accessList
} }
if tx.Type() == types.BlobTxType {
args.BlobHashes = tx.BlobHashes()
sidecar := tx.BlobTxSidecar()
if sidecar == nil {
return nil, errors.New("blobs must be present for signing")
}
args.Blobs = sidecar.Blobs
args.Commitments = sidecar.Commitments
args.Proofs = sidecar.Proofs
}
var res signTransactionResult var res signTransactionResult
if err := api.client.Call(&res, "account_signTransaction", args); err != nil { if err := api.client.Call(&res, "account_signTransaction", args); err != nil {
return nil, err return nil, err

@ -22,6 +22,7 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"slices"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -31,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"golang.org/x/exp/slices"
) )
// Minimum amount of time between cache reloads. This limit applies if the platform does // Minimum amount of time between cache reloads. This limit applies if the platform does

@ -23,6 +23,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"slices"
"testing" "testing"
"time" "time"
@ -30,7 +31,6 @@ import (
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"golang.org/x/exp/slices"
) )
var ( var (
@ -51,7 +51,7 @@ var (
} }
) )
// waitWatcherStarts waits up to 1s for the keystore watcher to start. // waitWatcherStart waits up to 1s for the keystore watcher to start.
func waitWatcherStart(ks *KeyStore) bool { func waitWatcherStart(ks *KeyStore) bool {
// On systems where file watch is not supported, just return "ok". // On systems where file watch is not supported, just return "ok".
if !ks.cache.watcher.enabled() { if !ks.cache.watcher.enabled() {
@ -86,7 +86,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
func TestWatchNewFile(t *testing.T) { func TestWatchNewFile(t *testing.T) {
t.Parallel() t.Parallel()
dir, ks := tmpKeyStore(t, false) dir, ks := tmpKeyStore(t)
// Ensure the watcher is started before adding any files. // Ensure the watcher is started before adding any files.
ks.Accounts() ks.Accounts()
@ -326,6 +326,11 @@ func TestUpdatedKeyfileContents(t *testing.T) {
// Create a temporary keystore to test with // Create a temporary keystore to test with
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int())) dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
// Create the directory
os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir)
ks := NewKeyStore(dir, LightScryptN, LightScryptP) ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts() list := ks.Accounts()
@ -335,9 +340,7 @@ func TestUpdatedKeyfileContents(t *testing.T) {
if !waitWatcherStart(ks) { if !waitWatcherStart(ks) {
t.Fatal("keystore watcher didn't start in time") t.Fatal("keystore watcher didn't start in time")
} }
// Create the directory and copy a key file into it. // Copy a key file into it
os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir)
file := filepath.Join(dir, "aaa") file := filepath.Join(dir, "aaa")
// Place one of our testfiles in there // Place one of our testfiles in there

@ -87,15 +87,6 @@ func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
return ks return ks
} }
// NewPlaintextKeyStore creates a keystore for the given directory.
// Deprecated: Use NewKeyStore.
func NewPlaintextKeyStore(keydir string) *KeyStore {
keydir, _ = filepath.Abs(keydir)
ks := &KeyStore{storage: &keyStorePlain{keydir}}
ks.init(keydir)
return ks
}
func (ks *KeyStore) init(keydir string) { func (ks *KeyStore) init(keydir string) {
// Lock the mutex since the account cache might call back with events // Lock the mutex since the account cache might call back with events
ks.mu.Lock() ks.mu.Lock()
@ -321,11 +312,10 @@ func (ks *KeyStore) Unlock(a accounts.Account, passphrase string) error {
// Lock removes the private key with the given address from memory. // Lock removes the private key with the given address from memory.
func (ks *KeyStore) Lock(addr common.Address) error { func (ks *KeyStore) Lock(addr common.Address) error {
ks.mu.Lock() ks.mu.Lock()
if unl, found := ks.unlocked[addr]; found { unl, found := ks.unlocked[addr]
ks.mu.Unlock() ks.mu.Unlock()
if found {
ks.expire(addr, unl, time.Duration(0)*time.Nanosecond) ks.expire(addr, unl, time.Duration(0)*time.Nanosecond)
} else {
ks.mu.Unlock()
} }
return nil return nil
} }
@ -509,7 +499,5 @@ func (ks *KeyStore) isUpdating() bool {
// zeroKey zeroes a private key in memory. // zeroKey zeroes a private key in memory.
func zeroKey(k *ecdsa.PrivateKey) { func zeroKey(k *ecdsa.PrivateKey) {
b := k.D.Bits() b := k.D.Bits()
for i := range b { clear(b)
b[i] = 0
}
} }

@ -20,6 +20,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"runtime" "runtime"
"slices"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -30,14 +31,13 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"golang.org/x/exp/slices"
) )
var testSigData = make([]byte, 32) var testSigData = make([]byte, 32)
func TestKeyStore(t *testing.T) { func TestKeyStore(t *testing.T) {
t.Parallel() t.Parallel()
dir, ks := tmpKeyStore(t, true) dir, ks := tmpKeyStore(t)
a, err := ks.NewAccount("foo") a, err := ks.NewAccount("foo")
if err != nil { if err != nil {
@ -72,7 +72,7 @@ func TestKeyStore(t *testing.T) {
func TestSign(t *testing.T) { func TestSign(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, true) _, ks := tmpKeyStore(t)
pass := "" // not used but required by API pass := "" // not used but required by API
a1, err := ks.NewAccount(pass) a1, err := ks.NewAccount(pass)
@ -89,7 +89,7 @@ func TestSign(t *testing.T) {
func TestSignWithPassphrase(t *testing.T) { func TestSignWithPassphrase(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, true) _, ks := tmpKeyStore(t)
pass := "passwd" pass := "passwd"
acc, err := ks.NewAccount(pass) acc, err := ks.NewAccount(pass)
@ -117,7 +117,7 @@ func TestSignWithPassphrase(t *testing.T) {
func TestTimedUnlock(t *testing.T) { func TestTimedUnlock(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, true) _, ks := tmpKeyStore(t)
pass := "foo" pass := "foo"
a1, err := ks.NewAccount(pass) a1, err := ks.NewAccount(pass)
@ -152,7 +152,7 @@ func TestTimedUnlock(t *testing.T) {
func TestOverrideUnlock(t *testing.T) { func TestOverrideUnlock(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, false) _, ks := tmpKeyStore(t)
pass := "foo" pass := "foo"
a1, err := ks.NewAccount(pass) a1, err := ks.NewAccount(pass)
@ -193,7 +193,7 @@ func TestOverrideUnlock(t *testing.T) {
// This test should fail under -race if signing races the expiration goroutine. // This test should fail under -race if signing races the expiration goroutine.
func TestSignRace(t *testing.T) { func TestSignRace(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, false) _, ks := tmpKeyStore(t)
// Create a test account. // Create a test account.
a1, err := ks.NewAccount("") a1, err := ks.NewAccount("")
@ -238,7 +238,7 @@ func waitForKsUpdating(t *testing.T, ks *KeyStore, wantStatus bool, maxTime time
func TestWalletNotifierLifecycle(t *testing.T) { func TestWalletNotifierLifecycle(t *testing.T) {
t.Parallel() t.Parallel()
// Create a temporary keystore to test with // Create a temporary keystore to test with
_, ks := tmpKeyStore(t, false) _, ks := tmpKeyStore(t)
// Ensure that the notification updater is not running yet // Ensure that the notification updater is not running yet
time.Sleep(250 * time.Millisecond) time.Sleep(250 * time.Millisecond)
@ -284,7 +284,7 @@ type walletEvent struct {
// or deleted from the keystore. // or deleted from the keystore.
func TestWalletNotifications(t *testing.T) { func TestWalletNotifications(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, false) _, ks := tmpKeyStore(t)
// Subscribe to the wallet feed and collect events. // Subscribe to the wallet feed and collect events.
var ( var (
@ -343,10 +343,10 @@ func TestWalletNotifications(t *testing.T) {
checkEvents(t, wantEvents, events) checkEvents(t, wantEvents, events)
} }
// TestImportExport tests the import functionality of a keystore. // TestImportECDSA tests the import functionality of a keystore.
func TestImportECDSA(t *testing.T) { func TestImportECDSA(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, true) _, ks := tmpKeyStore(t)
key, err := crypto.GenerateKey() key, err := crypto.GenerateKey()
if err != nil { if err != nil {
t.Fatalf("failed to generate key: %v", key) t.Fatalf("failed to generate key: %v", key)
@ -362,10 +362,10 @@ func TestImportECDSA(t *testing.T) {
} }
} }
// TestImportECDSA tests the import and export functionality of a keystore. // TestImportExport tests the import and export functionality of a keystore.
func TestImportExport(t *testing.T) { func TestImportExport(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, true) _, ks := tmpKeyStore(t)
acc, err := ks.NewAccount("old") acc, err := ks.NewAccount("old")
if err != nil { if err != nil {
t.Fatalf("failed to create account: %v", acc) t.Fatalf("failed to create account: %v", acc)
@ -374,7 +374,7 @@ func TestImportExport(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("failed to export account: %v", acc) t.Fatalf("failed to export account: %v", acc)
} }
_, ks2 := tmpKeyStore(t, true) _, ks2 := tmpKeyStore(t)
if _, err = ks2.Import(json, "old", "old"); err == nil { if _, err = ks2.Import(json, "old", "old"); err == nil {
t.Errorf("importing with invalid password succeeded") t.Errorf("importing with invalid password succeeded")
} }
@ -394,7 +394,7 @@ func TestImportExport(t *testing.T) {
// This test should fail under -race if importing races. // This test should fail under -race if importing races.
func TestImportRace(t *testing.T) { func TestImportRace(t *testing.T) {
t.Parallel() t.Parallel()
_, ks := tmpKeyStore(t, true) _, ks := tmpKeyStore(t)
acc, err := ks.NewAccount("old") acc, err := ks.NewAccount("old")
if err != nil { if err != nil {
t.Fatalf("failed to create account: %v", acc) t.Fatalf("failed to create account: %v", acc)
@ -403,7 +403,7 @@ func TestImportRace(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("failed to export account: %v", acc) t.Fatalf("failed to export account: %v", acc)
} }
_, ks2 := tmpKeyStore(t, true) _, ks2 := tmpKeyStore(t)
var atom atomic.Uint32 var atom atomic.Uint32
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(2) wg.Add(2)
@ -457,11 +457,7 @@ func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
} }
} }
func tmpKeyStore(t *testing.T, encrypted bool) (string, *KeyStore) { func tmpKeyStore(t *testing.T) (string, *KeyStore) {
d := t.TempDir() d := t.TempDir()
newKs := NewPlaintextKeyStore return d, NewKeyStore(d, veryLightScryptN, veryLightScryptP)
if encrypted {
newKs = func(kd string) *KeyStore { return NewKeyStore(kd, veryLightScryptN, veryLightScryptP) }
}
return d, newKs(d)
} }

@ -95,6 +95,7 @@ func (hub *Hub) readPairings() error {
} }
return err return err
} }
defer pairingFile.Close()
pairingData, err := io.ReadAll(pairingFile) pairingData, err := io.ReadAll(pairingFile)
if err != nil { if err != nil {
@ -241,7 +242,7 @@ func (hub *Hub) refreshWallets() {
card.Disconnect(pcsc.LeaveCard) card.Disconnect(pcsc.LeaveCard)
continue continue
} }
// Card connected, start tracking in amongs the wallets // Card connected, start tracking among the wallets
hub.wallets[reader] = wallet hub.wallets[reader] = wallet
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived}) events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
} }

@ -20,7 +20,6 @@ import (
"bytes" "bytes"
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/elliptic"
"crypto/rand" "crypto/rand"
"crypto/sha256" "crypto/sha256"
"crypto/sha512" "crypto/sha512"
@ -72,11 +71,11 @@ func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSes
if err != nil { if err != nil {
return nil, fmt.Errorf("could not unmarshal public key from card: %v", err) return nil, fmt.Errorf("could not unmarshal public key from card: %v", err)
} }
secret, _ := key.Curve.ScalarMult(cardPublic.X, cardPublic.Y, key.D.Bytes()) secret, _ := crypto.S256().ScalarMult(cardPublic.X, cardPublic.Y, key.D.Bytes())
return &SecureChannelSession{ return &SecureChannelSession{
card: card, card: card,
secret: secret.Bytes(), secret: secret.Bytes(),
publicKey: elliptic.Marshal(crypto.S256(), key.PublicKey.X, key.PublicKey.Y), publicKey: crypto.FromECDSAPub(&key.PublicKey),
}, nil }, nil
} }

@ -73,6 +73,14 @@ var (
DerivationSignatureHash = sha256.Sum256(common.Hash{}.Bytes()) DerivationSignatureHash = sha256.Sum256(common.Hash{}.Bytes())
) )
var (
// PinRegexp is the regular expression used to validate PIN codes.
pinRegexp = regexp.MustCompile(`^[0-9]{6,}$`)
// PukRegexp is the regular expression used to validate PUK codes.
pukRegexp = regexp.MustCompile(`^[0-9]{12,}$`)
)
// List of APDU command-related constants // List of APDU command-related constants
const ( const (
claISO7816 = 0 claISO7816 = 0
@ -380,7 +388,7 @@ func (w *Wallet) Open(passphrase string) error {
case passphrase == "": case passphrase == "":
return ErrPINUnblockNeeded return ErrPINUnblockNeeded
case status.PinRetryCount > 0: case status.PinRetryCount > 0:
if !regexp.MustCompile(`^[0-9]{6,}$`).MatchString(passphrase) { if !pinRegexp.MatchString(passphrase) {
w.log.Error("PIN needs to be at least 6 digits") w.log.Error("PIN needs to be at least 6 digits")
return ErrPINNeeded return ErrPINNeeded
} }
@ -388,7 +396,7 @@ func (w *Wallet) Open(passphrase string) error {
return err return err
} }
default: default:
if !regexp.MustCompile(`^[0-9]{12,}$`).MatchString(passphrase) { if !pukRegexp.MatchString(passphrase) {
w.log.Error("PUK needs to be at least 12 digits") w.log.Error("PUK needs to be at least 12 digits")
return ErrPINUnblockNeeded return ErrPINUnblockNeeded
} }

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/karalabe/usb" "github.com/karalabe/hid"
) )
// LedgerScheme is the protocol scheme prefixing account and wallet URLs. // LedgerScheme is the protocol scheme prefixing account and wallet URLs.
@ -109,7 +109,7 @@ func NewTrezorHubWithWebUSB() (*Hub, error) {
// newHub creates a new hardware wallet manager for generic USB devices. // newHub creates a new hardware wallet manager for generic USB devices.
func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) { func newHub(scheme string, vendorID uint16, productIDs []uint16, usageID uint16, endpointID int, makeDriver func(log.Logger) driver) (*Hub, error) {
if !usb.Supported() { if !hid.Supported() {
return nil, errors.New("unsupported platform") return nil, errors.New("unsupported platform")
} }
hub := &Hub{ hub := &Hub{
@ -155,7 +155,7 @@ func (hub *Hub) refreshWallets() {
return return
} }
// Retrieve the current list of USB wallet devices // Retrieve the current list of USB wallet devices
var devices []usb.DeviceInfo var devices []hid.DeviceInfo
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
// hidapi on Linux opens the device during enumeration to retrieve some infos, // hidapi on Linux opens the device during enumeration to retrieve some infos,
@ -170,7 +170,7 @@ func (hub *Hub) refreshWallets() {
return return
} }
} }
infos, err := usb.Enumerate(hub.vendorID, 0) infos, err := hid.Enumerate(hub.vendorID, 0)
if err != nil { if err != nil {
failcount := hub.enumFails.Add(1) failcount := hub.enumFails.Add(1)
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {

@ -16,7 +16,7 @@
// This file contains the implementation for interacting with the Ledger hardware // This file contains the implementation for interacting with the Ledger hardware
// wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo: // wallets. The wire protocol spec can be found in the Ledger Blue GitHub repo:
// https://raw.githubusercontent.com/LedgerHQ/blue-app-eth/master/doc/ethapp.asc // https://github.com/LedgerHQ/app-ethereum/blob/develop/doc/ethapp.adoc
package usbwallet package usbwallet

@ -16,7 +16,7 @@
// This file contains the implementation for interacting with the Trezor hardware // This file contains the implementation for interacting with the Trezor hardware
// wallets. The wire protocol spec can be found on the SatoshiLabs website: // wallets. The wire protocol spec can be found on the SatoshiLabs website:
// https://wiki.trezor.io/Developers_guide-Message_Workflows // https://docs.trezor.io/trezor-firmware/common/message-workflows.html
// !!! STAHP !!! // !!! STAHP !!!
// //

@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/karalabe/usb" "github.com/karalabe/hid"
) )
// Maximum time between wallet health checks to detect USB unplugs. // Maximum time between wallet health checks to detect USB unplugs.
@ -79,8 +79,8 @@ type wallet struct {
driver driver // Hardware implementation of the low level device operations driver driver // Hardware implementation of the low level device operations
url *accounts.URL // Textual URL uniquely identifying this wallet url *accounts.URL // Textual URL uniquely identifying this wallet
info usb.DeviceInfo // Known USB device infos about the wallet info hid.DeviceInfo // Known USB device infos about the wallet
device usb.Device // USB device advertising itself as a hardware wallet device hid.Device // USB device advertising itself as a hardware wallet
accounts []accounts.Account // List of derive accounts pinned on the hardware wallet accounts []accounts.Account // List of derive accounts pinned on the hardware wallet
paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations paths map[common.Address]accounts.DerivationPath // Known derivation paths for signing operations

@ -26,7 +26,7 @@ for:
- go run build/ci.go lint - go run build/ci.go lint
- go run build/ci.go install -dlgo - go run build/ci.go install -dlgo
test_script: test_script:
- go run build/ci.go test -dlgo - go run build/ci.go test -dlgo -short
# linux/386 is disabled. # linux/386 is disabled.
- matrix: - matrix:

@ -0,0 +1,163 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
)
// beaconBlockSync implements request.Module; it fetches the beacon blocks belonging
// to the validated and prefetch heads.
type beaconBlockSync struct {
recentBlocks *lru.Cache[common.Hash, *types.BeaconBlock]
locked map[common.Hash]request.ServerAndID
serverHeads map[request.Server]common.Hash
headTracker headTracker
lastHeadInfo types.HeadInfo
chainHeadFeed event.FeedOf[types.ChainHeadEvent]
}
type headTracker interface {
PrefetchHead() types.HeadInfo
ValidatedOptimistic() (types.OptimisticUpdate, bool)
ValidatedFinality() (types.FinalityUpdate, bool)
}
// newBeaconBlockSync returns a new beaconBlockSync.
func newBeaconBlockSync(headTracker headTracker) *beaconBlockSync {
return &beaconBlockSync{
headTracker: headTracker,
recentBlocks: lru.NewCache[common.Hash, *types.BeaconBlock](10),
locked: make(map[common.Hash]request.ServerAndID),
serverHeads: make(map[request.Server]common.Hash),
}
}
func (s *beaconBlockSync) SubscribeChainHead(ch chan<- types.ChainHeadEvent) event.Subscription {
return s.chainHeadFeed.Subscribe(ch)
}
// Process implements request.Module.
func (s *beaconBlockSync) Process(requester request.Requester, events []request.Event) {
for _, event := range events {
switch event.Type {
case request.EvResponse, request.EvFail, request.EvTimeout:
sid, req, resp := event.RequestInfo()
blockRoot := common.Hash(req.(sync.ReqBeaconBlock))
log.Debug("Beacon block event", "type", event.Type.Name, "hash", blockRoot)
if resp != nil {
s.recentBlocks.Add(blockRoot, resp.(*types.BeaconBlock))
}
if s.locked[blockRoot] == sid {
delete(s.locked, blockRoot)
}
case sync.EvNewHead:
s.serverHeads[event.Server] = event.Data.(types.HeadInfo).BlockRoot
case request.EvUnregistered:
delete(s.serverHeads, event.Server)
}
}
s.updateEventFeed()
// request validated head block if unavailable and not yet requested
if vh, ok := s.headTracker.ValidatedOptimistic(); ok {
s.tryRequestBlock(requester, vh.Attested.Hash(), false)
}
// request prefetch head if the given server has announced it
if prefetchHead := s.headTracker.PrefetchHead().BlockRoot; prefetchHead != (common.Hash{}) {
s.tryRequestBlock(requester, prefetchHead, true)
}
}
func (s *beaconBlockSync) tryRequestBlock(requester request.Requester, blockRoot common.Hash, needSameHead bool) {
if _, ok := s.recentBlocks.Get(blockRoot); ok {
return
}
if _, ok := s.locked[blockRoot]; ok {
return
}
for _, server := range requester.CanSendTo() {
if needSameHead && (s.serverHeads[server] != blockRoot) {
continue
}
id := requester.Send(server, sync.ReqBeaconBlock(blockRoot))
s.locked[blockRoot] = request.ServerAndID{Server: server, ID: id}
return
}
}
func blockHeadInfo(block *types.BeaconBlock) types.HeadInfo {
if block == nil {
return types.HeadInfo{}
}
return types.HeadInfo{Slot: block.Slot(), BlockRoot: block.Root()}
}
func (s *beaconBlockSync) updateEventFeed() {
optimistic, ok := s.headTracker.ValidatedOptimistic()
if !ok {
return
}
validatedHead := optimistic.Attested.Hash()
headBlock, ok := s.recentBlocks.Get(validatedHead)
if !ok {
return
}
var finalizedHash common.Hash
if finality, ok := s.headTracker.ValidatedFinality(); ok {
he := optimistic.Attested.Epoch()
fe := finality.Attested.Header.Epoch()
switch {
case he == fe:
finalizedHash = finality.Finalized.PayloadHeader.BlockHash()
case he < fe:
return
case he == fe+1:
parent, ok := s.recentBlocks.Get(optimistic.Attested.ParentRoot)
if !ok || parent.Slot()/params.EpochLength == fe {
return // head is at first slot of next epoch, wait for finality update
}
}
}
headInfo := blockHeadInfo(headBlock)
if headInfo == s.lastHeadInfo {
return
}
s.lastHeadInfo = headInfo
// new head block and finality info available; extract executable data and send event to feed
execBlock, err := headBlock.ExecutionPayload()
if err != nil {
log.Error("Error extracting execution block from validated beacon block", "error", err)
return
}
s.chainHeadFeed.Send(types.ChainHeadEvent{
BeaconHead: optimistic.Attested.Header,
Block: execBlock,
Finalized: finalizedHash,
})
}

@ -0,0 +1,160 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"testing"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/zrnt/eth2/beacon/deneb"
)
var (
testServer1 = testServer("testServer1")
testServer2 = testServer("testServer2")
testBlock1 = types.NewBeaconBlock(&deneb.BeaconBlock{
Slot: 123,
Body: deneb.BeaconBlockBody{
ExecutionPayload: deneb.ExecutionPayload{
BlockNumber: 456,
BlockHash: zrntcommon.Hash32(common.HexToHash("905ac721c4058d9ed40b27b6b9c1bdd10d4333e4f3d9769100bf9dfb80e5d1f6")),
},
},
})
testBlock2 = types.NewBeaconBlock(&deneb.BeaconBlock{
Slot: 124,
Body: deneb.BeaconBlockBody{
ExecutionPayload: deneb.ExecutionPayload{
BlockNumber: 457,
BlockHash: zrntcommon.Hash32(common.HexToHash("011703f39c664efc1c6cf5f49ca09b595581eec572d4dfddd3d6179a9e63e655")),
},
},
})
)
type testServer string
func (t testServer) Name() string {
return string(t)
}
func TestBlockSync(t *testing.T) {
ht := &testHeadTracker{}
blockSync := newBeaconBlockSync(ht)
headCh := make(chan types.ChainHeadEvent, 16)
blockSync.SubscribeChainHead(headCh)
ts := sync.NewTestScheduler(t, blockSync)
ts.AddServer(testServer1, 1)
ts.AddServer(testServer2, 1)
expHeadBlock := func(expHead *types.BeaconBlock) {
t.Helper()
var expNumber, headNumber uint64
if expHead != nil {
p, _ := expHead.ExecutionPayload()
expNumber = p.NumberU64()
}
select {
case event := <-headCh:
headNumber = event.Block.NumberU64()
default:
}
if headNumber != expNumber {
t.Errorf("Wrong head block, expected block number %d, got %d)", expNumber, headNumber)
}
}
// no block requests expected until head tracker knows about a head
ts.Run(1)
expHeadBlock(nil)
// set block 1 as prefetch head, announced by server 2
head1 := blockHeadInfo(testBlock1)
ht.prefetch = head1
ts.ServerEvent(sync.EvNewHead, testServer2, head1)
// expect request to server 2 which has announced the head
ts.Run(2, testServer2, sync.ReqBeaconBlock(head1.BlockRoot))
// valid response
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testBlock1)
ts.AddAllowance(testServer2, 1)
ts.Run(3)
// head block still not expected as the fetched block is not the validated head yet
expHeadBlock(nil)
// set as validated head, expect no further requests but block 1 set as head block
ht.validated.Header = testBlock1.Header()
ts.Run(4)
expHeadBlock(testBlock1)
// set block 2 as prefetch head, announced by server 1
head2 := blockHeadInfo(testBlock2)
ht.prefetch = head2
ts.ServerEvent(sync.EvNewHead, testServer1, head2)
// expect request to server 1
ts.Run(5, testServer1, sync.ReqBeaconBlock(head2.BlockRoot))
// req2 fails, no further requests expected because server 2 has not announced it
ts.RequestEvent(request.EvFail, ts.Request(5, 1), nil)
ts.Run(6)
// set as validated head before retrieving block; now it's assumed to be available from server 2 too
ht.validated.Header = testBlock2.Header()
// expect req2 retry to server 2
ts.Run(7, testServer2, sync.ReqBeaconBlock(head2.BlockRoot))
// now head block should be unavailable again
expHeadBlock(nil)
// valid response, now head block should be block 2 immediately as it is already validated
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testBlock2)
ts.Run(8)
expHeadBlock(testBlock2)
}
type testHeadTracker struct {
prefetch types.HeadInfo
validated types.SignedHeader
}
func (h *testHeadTracker) PrefetchHead() types.HeadInfo {
return h.prefetch
}
func (h *testHeadTracker) ValidatedOptimistic() (types.OptimisticUpdate, bool) {
return types.OptimisticUpdate{
Attested: types.HeaderWithExecProof{Header: h.validated.Header},
Signature: h.validated.Signature,
SignatureSlot: h.validated.SignatureSlot,
}, h.validated.Header != (types.Header{})
}
// TODO add test case for finality
func (h *testHeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
finalized := types.NewExecutionHeader(new(deneb.ExecutionPayloadHeader))
return types.FinalityUpdate{
Attested: types.HeaderWithExecProof{Header: h.validated.Header},
Finalized: types.HeaderWithExecProof{PayloadHeader: finalized},
Signature: h.validated.Signature,
SignatureSlot: h.validated.SignatureSlot,
}, h.validated.Header != (types.Header{})
}

@ -0,0 +1,115 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"strings"
"github.com/ethereum/go-ethereum/beacon/light"
"github.com/ethereum/go-ethereum/beacon/light/api"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2"
)
type Client struct {
urls []string
customHeader map[string]string
chainConfig *lightClientConfig
scheduler *request.Scheduler
blockSync *beaconBlockSync
engineRPC *rpc.Client
chainHeadSub event.Subscription
engineClient *engineClient
}
func NewClient(ctx *cli.Context) *Client {
if !ctx.IsSet(utils.BeaconApiFlag.Name) {
utils.Fatalf("Beacon node light client API URL not specified")
}
var (
chainConfig = makeChainConfig(ctx)
customHeader = make(map[string]string)
)
for _, s := range ctx.StringSlice(utils.BeaconApiHeaderFlag.Name) {
kv := strings.Split(s, ":")
if len(kv) != 2 {
utils.Fatalf("Invalid custom API header entry: %s", s)
}
customHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
}
// create data structures
var (
db = memorydb.New()
threshold = ctx.Int(utils.BeaconThresholdFlag.Name)
committeeChain = light.NewCommitteeChain(db, chainConfig.ChainConfig, threshold, !ctx.Bool(utils.BeaconNoFilterFlag.Name))
headTracker = light.NewHeadTracker(committeeChain, threshold)
)
headSync := sync.NewHeadSync(headTracker, committeeChain)
// set up scheduler and sync modules
scheduler := request.NewScheduler()
checkpointInit := sync.NewCheckpointInit(committeeChain, chainConfig.Checkpoint)
forwardSync := sync.NewForwardUpdateSync(committeeChain)
beaconBlockSync := newBeaconBlockSync(headTracker)
scheduler.RegisterTarget(headTracker)
scheduler.RegisterTarget(committeeChain)
scheduler.RegisterModule(checkpointInit, "checkpointInit")
scheduler.RegisterModule(forwardSync, "forwardSync")
scheduler.RegisterModule(headSync, "headSync")
scheduler.RegisterModule(beaconBlockSync, "beaconBlockSync")
return &Client{
scheduler: scheduler,
urls: ctx.StringSlice(utils.BeaconApiFlag.Name),
customHeader: customHeader,
chainConfig: &chainConfig,
blockSync: beaconBlockSync,
}
}
func (c *Client) SetEngineRPC(engine *rpc.Client) {
c.engineRPC = engine
}
func (c *Client) Start() error {
headCh := make(chan types.ChainHeadEvent, 16)
c.chainHeadSub = c.blockSync.SubscribeChainHead(headCh)
c.engineClient = startEngineClient(c.chainConfig, c.engineRPC, headCh)
c.scheduler.Start()
for _, url := range c.urls {
beaconApi := api.NewBeaconLightApi(url, c.customHeader)
c.scheduler.RegisterServer(request.NewServer(api.NewApiServer(beaconApi), &mclock.System{}))
}
return nil
}
func (c *Client) Stop() error {
c.engineClient.stop()
c.chainHeadSub.Unsubscribe()
c.scheduler.Stop()
return nil
}

@ -0,0 +1,129 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/urfave/cli/v2"
)
// lightClientConfig contains beacon light client configuration
type lightClientConfig struct {
*types.ChainConfig
Checkpoint common.Hash
}
var (
MainnetConfig = lightClientConfig{
ChainConfig: (&types.ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
GenesisTime: 1606824023,
}).
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}).
AddFork("DENEB", 269568, []byte{4, 0, 0, 0}),
Checkpoint: common.HexToHash("0x388be41594ec7d6a6894f18c73f3469f07e2c19a803de4755d335817ed8e2e5a"),
}
SepoliaConfig = lightClientConfig{
ChainConfig: (&types.ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
GenesisTime: 1655733600,
}).
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}).
AddFork("DENEB", 132608, []byte{144, 0, 0, 115}),
Checkpoint: common.HexToHash("0x1005a6d9175e96bfbce4d35b80f468e9bff0b674e1e861d16e09e10005a58e81"),
}
GoerliConfig = lightClientConfig{
ChainConfig: (&types.ChainConfig{
GenesisValidatorsRoot: common.HexToHash("0x043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb"),
GenesisTime: 1614588812,
}).
AddFork("GENESIS", 0, []byte{0, 0, 16, 32}).
AddFork("ALTAIR", 36660, []byte{1, 0, 16, 32}).
AddFork("BELLATRIX", 112260, []byte{2, 0, 16, 32}).
AddFork("CAPELLA", 162304, []byte{3, 0, 16, 32}).
AddFork("DENEB", 231680, []byte{4, 0, 16, 32}),
Checkpoint: common.HexToHash("0x53a0f4f0a378e2c4ae0a9ee97407eb69d0d737d8d8cd0a5fb1093f42f7b81c49"),
}
)
func makeChainConfig(ctx *cli.Context) lightClientConfig {
var config lightClientConfig
customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name)
utils.CheckExclusive(ctx, utils.MainnetFlag, utils.GoerliFlag, utils.SepoliaFlag, utils.BeaconConfigFlag)
switch {
case ctx.Bool(utils.MainnetFlag.Name):
config = MainnetConfig
case ctx.Bool(utils.SepoliaFlag.Name):
config = SepoliaConfig
case ctx.Bool(utils.GoerliFlag.Name):
config = GoerliConfig
default:
if !customConfig {
config = MainnetConfig
}
}
// Genesis root and time should always be specified together with custom chain config
if customConfig {
if !ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but genesis root is missing")
}
if !ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but genesis time is missing")
}
if !ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
utils.Fatalf("Custom beacon chain config is specified but checkpoint is missing")
}
config.ChainConfig = &types.ChainConfig{
GenesisTime: ctx.Uint64(utils.BeaconGenesisTimeFlag.Name),
}
if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
copy(config.GenesisValidatorsRoot[:len(c)], c)
} else {
utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err)
}
if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil {
utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err)
}
} else {
if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
utils.Fatalf("Genesis root is specified but custom beacon chain config is missing")
}
if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
utils.Fatalf("Genesis time is specified but custom beacon chain config is missing")
}
}
// Checkpoint is required with custom chain config and is optional with pre-defined config
if ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
copy(config.Checkpoint[:len(c)], c)
} else {
utils.Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(utils.BeaconCheckpointFlag.Name), "error", err)
}
}
return config
}

@ -0,0 +1,150 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package blsync
import (
"context"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
ctypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
)
type engineClient struct {
config *lightClientConfig
rpc *rpc.Client
rootCtx context.Context
cancelRoot context.CancelFunc
wg sync.WaitGroup
}
func startEngineClient(config *lightClientConfig, rpc *rpc.Client, headCh <-chan types.ChainHeadEvent) *engineClient {
ctx, cancel := context.WithCancel(context.Background())
ec := &engineClient{
config: config,
rpc: rpc,
rootCtx: ctx,
cancelRoot: cancel,
}
ec.wg.Add(1)
go ec.updateLoop(headCh)
return ec
}
func (ec *engineClient) stop() {
ec.cancelRoot()
ec.wg.Wait()
}
func (ec *engineClient) updateLoop(headCh <-chan types.ChainHeadEvent) {
defer ec.wg.Done()
for {
select {
case <-ec.rootCtx.Done():
log.Debug("Stopping engine API update loop")
return
case event := <-headCh:
if ec.rpc == nil { // dry run, no engine API specified
log.Info("New execution block retrieved", "number", event.Block.NumberU64(), "hash", event.Block.Hash(), "finalized", event.Finalized)
continue
}
fork := ec.config.ForkAtEpoch(event.BeaconHead.Epoch())
forkName := strings.ToLower(fork.Name)
log.Debug("Calling NewPayload", "number", event.Block.NumberU64(), "hash", event.Block.Hash())
if status, err := ec.callNewPayload(forkName, event); err == nil {
log.Info("Successful NewPayload", "number", event.Block.NumberU64(), "hash", event.Block.Hash(), "status", status)
} else {
log.Error("Failed NewPayload", "number", event.Block.NumberU64(), "hash", event.Block.Hash(), "error", err)
}
log.Debug("Calling ForkchoiceUpdated", "head", event.Block.Hash())
if status, err := ec.callForkchoiceUpdated(forkName, event); err == nil {
log.Info("Successful ForkchoiceUpdated", "head", event.Block.Hash(), "status", status)
} else {
log.Error("Failed ForkchoiceUpdated", "head", event.Block.Hash(), "error", err)
}
}
}
}
func (ec *engineClient) callNewPayload(fork string, event types.ChainHeadEvent) (string, error) {
execData := engine.BlockToExecutableData(event.Block, nil, nil).ExecutionPayload
var (
method string
params = []any{execData}
)
switch fork {
case "deneb":
method = "engine_newPayloadV3"
parentBeaconRoot := event.BeaconHead.ParentRoot
blobHashes := collectBlobHashes(event.Block)
params = append(params, blobHashes, parentBeaconRoot)
case "capella":
method = "engine_newPayloadV2"
default:
method = "engine_newPayloadV1"
}
ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5)
defer cancel()
var resp engine.PayloadStatusV1
err := ec.rpc.CallContext(ctx, &resp, method, params...)
return resp.Status, err
}
func collectBlobHashes(b *ctypes.Block) []common.Hash {
list := make([]common.Hash, 0)
for _, tx := range b.Transactions() {
list = append(list, tx.BlobHashes()...)
}
return list
}
func (ec *engineClient) callForkchoiceUpdated(fork string, event types.ChainHeadEvent) (string, error) {
update := engine.ForkchoiceStateV1{
HeadBlockHash: event.Block.Hash(),
SafeBlockHash: event.Finalized,
FinalizedBlockHash: event.Finalized,
}
var method string
switch fork {
case "deneb":
method = "engine_forkchoiceUpdatedV3"
case "capella":
method = "engine_forkchoiceUpdatedV2"
default:
method = "engine_forkchoiceUpdatedV1"
}
ctx, cancel := context.WithTimeout(ec.rootCtx, time.Second*5)
defer cancel()
var resp engine.ForkChoiceResponse
err := ec.rpc.CallContext(ctx, &resp, method, update, nil)
return resp.PayloadStatus.Status, err
}

@ -19,6 +19,7 @@ package engine
import ( import (
"fmt" "fmt"
"math/big" "math/big"
"slices"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -26,6 +27,16 @@ import (
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
// PayloadVersion denotes the version of PayloadAttributes used to request the
// building of the payload to commence.
type PayloadVersion byte
var (
PayloadV1 PayloadVersion = 0x1
PayloadV2 PayloadVersion = 0x2
PayloadV3 PayloadVersion = 0x3
)
//go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go //go:generate go run github.com/fjl/gencodec -type PayloadAttributes -field-override payloadAttributesMarshaling -out gen_blockparams.go
// PayloadAttributes describes the environment context in which a block should // PayloadAttributes describes the environment context in which a block should
@ -115,6 +126,16 @@ type TransitionConfigurationV1 struct {
// PayloadID is an identifier of the payload build process // PayloadID is an identifier of the payload build process
type PayloadID [8]byte type PayloadID [8]byte
// Version returns the payload version associated with the identifier.
func (b PayloadID) Version() PayloadVersion {
return PayloadVersion(b[0])
}
// Is returns whether the identifier matches any of provided payload versions.
func (b PayloadID) Is(versions ...PayloadVersion) bool {
return slices.Contains(versions, b.Version())
}
func (b PayloadID) String() string { func (b PayloadID) String() string {
return hexutil.Encode(b[:]) return hexutil.Encode(b[:])
} }
@ -188,7 +209,7 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash,
if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) { if params.BaseFeePerGas != nil && (params.BaseFeePerGas.Sign() == -1 || params.BaseFeePerGas.BitLen() > 256) {
return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas) return nil, fmt.Errorf("invalid baseFeePerGas: %v", params.BaseFeePerGas)
} }
var blobHashes []common.Hash var blobHashes = make([]common.Hash, 0, len(txs))
for _, tx := range txs { for _, tx := range txs {
blobHashes = append(blobHashes, tx.BlobHashes()...) blobHashes = append(blobHashes, tx.BlobHashes()...)
} }
@ -229,7 +250,7 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash,
BlobGasUsed: params.BlobGasUsed, BlobGasUsed: params.BlobGasUsed,
ParentBeaconRoot: beaconRoot, ParentBeaconRoot: beaconRoot,
} }
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals) block := types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: txs, Uncles: nil, Withdrawals: params.Withdrawals})
if block.Hash() != params.BlockHash { if block.Hash() != params.BlockHash {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash()) return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
} }
@ -278,3 +299,21 @@ type ExecutionPayloadBodyV1 struct {
TransactionData []hexutil.Bytes `json:"transactions"` TransactionData []hexutil.Bytes `json:"transactions"`
Withdrawals []*types.Withdrawal `json:"withdrawals"` Withdrawals []*types.Withdrawal `json:"withdrawals"`
} }
// Client identifiers to support ClientVersionV1.
const (
ClientCode = "GE"
ClientName = "go-ethereum"
)
// ClientVersionV1 contains information which identifies a client implementation.
type ClientVersionV1 struct {
Code string `json:"code"`
Name string `json:"name"`
Version string `json:"version"`
Commit string `json:"commit"`
}
func (v *ClientVersionV1) String() string {
return fmt.Sprintf("%s-%s-%s-%s", v.Code, v.Name, v.Version, v.Commit)
}

@ -0,0 +1,114 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
import (
"reflect"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/light/sync"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
// ApiServer is a wrapper around BeaconLightApi that implements request.requestServer.
type ApiServer struct {
api *BeaconLightApi
eventCallback func(event request.Event)
unsubscribe func()
}
// NewApiServer creates a new ApiServer.
func NewApiServer(api *BeaconLightApi) *ApiServer {
return &ApiServer{api: api}
}
// Subscribe implements request.requestServer.
func (s *ApiServer) Subscribe(eventCallback func(event request.Event)) {
s.eventCallback = eventCallback
listener := HeadEventListener{
OnNewHead: func(slot uint64, blockRoot common.Hash) {
log.Debug("New head received", "slot", slot, "blockRoot", blockRoot)
eventCallback(request.Event{Type: sync.EvNewHead, Data: types.HeadInfo{Slot: slot, BlockRoot: blockRoot}})
},
OnOptimistic: func(update types.OptimisticUpdate) {
log.Debug("New optimistic update received", "slot", update.Attested.Slot, "blockRoot", update.Attested.Hash(), "signerCount", update.Signature.SignerCount())
eventCallback(request.Event{Type: sync.EvNewOptimisticUpdate, Data: update})
},
OnFinality: func(update types.FinalityUpdate) {
log.Debug("New finality update received", "slot", update.Attested.Slot, "blockRoot", update.Attested.Hash(), "signerCount", update.Signature.SignerCount())
eventCallback(request.Event{Type: sync.EvNewFinalityUpdate, Data: update})
},
OnError: func(err error) {
log.Warn("Head event stream error", "err", err)
},
}
s.unsubscribe = s.api.StartHeadListener(listener)
}
// SendRequest implements request.requestServer.
func (s *ApiServer) SendRequest(id request.ID, req request.Request) {
go func() {
var resp request.Response
var err error
switch data := req.(type) {
case sync.ReqUpdates:
log.Debug("Beacon API: requesting light client update", "reqid", id, "period", data.FirstPeriod, "count", data.Count)
var r sync.RespUpdates
r.Updates, r.Committees, err = s.api.GetBestUpdatesAndCommittees(data.FirstPeriod, data.Count)
resp = r
case sync.ReqHeader:
var r sync.RespHeader
log.Debug("Beacon API: requesting header", "reqid", id, "hash", common.Hash(data))
r.Header, r.Canonical, r.Finalized, err = s.api.GetHeader(common.Hash(data))
resp = r
case sync.ReqCheckpointData:
log.Debug("Beacon API: requesting checkpoint data", "reqid", id, "hash", common.Hash(data))
resp, err = s.api.GetCheckpointData(common.Hash(data))
case sync.ReqBeaconBlock:
log.Debug("Beacon API: requesting block", "reqid", id, "hash", common.Hash(data))
resp, err = s.api.GetBeaconBlock(common.Hash(data))
case sync.ReqFinality:
log.Debug("Beacon API: requesting finality update")
resp, err = s.api.GetFinalityUpdate()
default:
}
if err != nil {
log.Warn("Beacon API request failed", "type", reflect.TypeOf(req), "reqid", id, "err", err)
s.eventCallback(request.Event{Type: request.EvFail, Data: request.RequestResponse{ID: id, Request: req}})
} else {
log.Debug("Beacon API request answered", "type", reflect.TypeOf(req), "reqid", id)
s.eventCallback(request.Event{Type: request.EvResponse, Data: request.RequestResponse{ID: id, Request: req, Response: resp}})
}
}()
}
// Unsubscribe implements request.requestServer.
// Note: Unsubscribe should not be called concurrently with Subscribe.
func (s *ApiServer) Unsubscribe() {
if s.unsubscribe != nil {
s.unsubscribe()
s.unsubscribe = nil
}
}
// Name implements request.Server
func (s *ApiServer) Name() string {
return s.api.url
}

@ -0,0 +1,578 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more detaiapi.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"sync"
"time"
"github.com/donovanhide/eventsource"
"github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
)
var (
ErrNotFound = errors.New("404 Not Found")
ErrInternal = errors.New("500 Internal Server Error")
)
type CommitteeUpdate struct {
Version string
Update types.LightClientUpdate
NextSyncCommittee types.SerializedSyncCommittee
}
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate
type committeeUpdateJson struct {
Version string `json:"version"`
Data committeeUpdateData `json:"data"`
}
type committeeUpdateData struct {
Header jsonBeaconHeader `json:"attested_header"`
NextSyncCommittee types.SerializedSyncCommittee `json:"next_sync_committee"`
NextSyncCommitteeBranch merkle.Values `json:"next_sync_committee_branch"`
FinalizedHeader *jsonBeaconHeader `json:"finalized_header,omitempty"`
FinalityBranch merkle.Values `json:"finality_branch,omitempty"`
SyncAggregate types.SyncAggregate `json:"sync_aggregate"`
SignatureSlot common.Decimal `json:"signature_slot"`
}
type jsonBeaconHeader struct {
Beacon types.Header `json:"beacon"`
}
type jsonHeaderWithExecProof struct {
Beacon types.Header `json:"beacon"`
Execution json.RawMessage `json:"execution"`
ExecutionBranch merkle.Values `json:"execution_branch"`
}
// UnmarshalJSON unmarshals from JSON.
func (u *CommitteeUpdate) UnmarshalJSON(input []byte) error {
var dec committeeUpdateJson
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
u.Version = dec.Version
u.NextSyncCommittee = dec.Data.NextSyncCommittee
u.Update = types.LightClientUpdate{
AttestedHeader: types.SignedHeader{
Header: dec.Data.Header.Beacon,
Signature: dec.Data.SyncAggregate,
SignatureSlot: uint64(dec.Data.SignatureSlot),
},
NextSyncCommitteeRoot: u.NextSyncCommittee.Root(),
NextSyncCommitteeBranch: dec.Data.NextSyncCommitteeBranch,
FinalityBranch: dec.Data.FinalityBranch,
}
if dec.Data.FinalizedHeader != nil {
u.Update.FinalizedHeader = &dec.Data.FinalizedHeader.Beacon
}
return nil
}
// fetcher is an interface useful for debug-harnessing the http api.
type fetcher interface {
Do(req *http.Request) (*http.Response, error)
}
// BeaconLightApi requests light client information from a beacon node REST API.
// Note: all required API endpoints are currently only implemented by Lodestar.
type BeaconLightApi struct {
url string
client fetcher
customHeaders map[string]string
}
func NewBeaconLightApi(url string, customHeaders map[string]string) *BeaconLightApi {
return &BeaconLightApi{
url: url,
client: &http.Client{
Timeout: time.Second * 10,
},
customHeaders: customHeaders,
}
}
func (api *BeaconLightApi) httpGet(path string) ([]byte, error) {
req, err := http.NewRequest("GET", api.url+path, nil)
if err != nil {
return nil, err
}
for k, v := range api.customHeaders {
req.Header.Set(k, v)
}
resp, err := api.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case 200:
return io.ReadAll(resp.Body)
case 404:
return nil, ErrNotFound
case 500:
return nil, ErrInternal
default:
return nil, fmt.Errorf("unexpected error from API endpoint \"%s\": status code %d", path, resp.StatusCode)
}
}
func (api *BeaconLightApi) httpGetf(format string, params ...any) ([]byte, error) {
return api.httpGet(fmt.Sprintf(format, params...))
}
// GetBestUpdatesAndCommittees fetches and validates LightClientUpdate for given
// period and full serialized committee for the next period (committee root hash
// equals update.NextSyncCommitteeRoot).
// Note that the results are validated but the update signature should be verified
// by the caller as its validity depends on the update chain.
func (api *BeaconLightApi) GetBestUpdatesAndCommittees(firstPeriod, count uint64) ([]*types.LightClientUpdate, []*types.SerializedSyncCommittee, error) {
resp, err := api.httpGetf("/eth/v1/beacon/light_client/updates?start_period=%d&count=%d", firstPeriod, count)
if err != nil {
return nil, nil, err
}
var data []CommitteeUpdate
if err := json.Unmarshal(resp, &data); err != nil {
return nil, nil, err
}
if len(data) != int(count) {
return nil, nil, errors.New("invalid number of committee updates")
}
updates := make([]*types.LightClientUpdate, int(count))
committees := make([]*types.SerializedSyncCommittee, int(count))
for i, d := range data {
if d.Update.AttestedHeader.Header.SyncPeriod() != firstPeriod+uint64(i) {
return nil, nil, errors.New("wrong committee update header period")
}
if err := d.Update.Validate(); err != nil {
return nil, nil, err
}
if d.NextSyncCommittee.Root() != d.Update.NextSyncCommitteeRoot {
return nil, nil, errors.New("wrong sync committee root")
}
updates[i], committees[i] = new(types.LightClientUpdate), new(types.SerializedSyncCommittee)
*updates[i], *committees[i] = d.Update, d.NextSyncCommittee
}
return updates, committees, nil
}
// GetOptimisticUpdate fetches the latest available optimistic update.
// Note that the signature should be verified by the caller as its validity
// depends on the update chain.
//
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
func (api *BeaconLightApi) GetOptimisticUpdate() (types.OptimisticUpdate, error) {
resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update")
if err != nil {
return types.OptimisticUpdate{}, err
}
return decodeOptimisticUpdate(resp)
}
func decodeOptimisticUpdate(enc []byte) (types.OptimisticUpdate, error) {
var data struct {
Version string
Data struct {
Attested jsonHeaderWithExecProof `json:"attested_header"`
Aggregate types.SyncAggregate `json:"sync_aggregate"`
SignatureSlot common.Decimal `json:"signature_slot"`
} `json:"data"`
}
if err := json.Unmarshal(enc, &data); err != nil {
return types.OptimisticUpdate{}, err
}
// Decode the execution payload headers.
attestedExecHeader, err := types.ExecutionHeaderFromJSON(data.Version, data.Data.Attested.Execution)
if err != nil {
return types.OptimisticUpdate{}, fmt.Errorf("invalid attested header: %v", err)
}
if data.Data.Attested.Beacon.StateRoot == (common.Hash{}) {
// workaround for different event encoding format in Lodestar
if err := json.Unmarshal(enc, &data.Data); err != nil {
return types.OptimisticUpdate{}, err
}
}
if len(data.Data.Aggregate.Signers) != params.SyncCommitteeBitmaskSize {
return types.OptimisticUpdate{}, errors.New("invalid sync_committee_bits length")
}
if len(data.Data.Aggregate.Signature) != params.BLSSignatureSize {
return types.OptimisticUpdate{}, errors.New("invalid sync_committee_signature length")
}
return types.OptimisticUpdate{
Attested: types.HeaderWithExecProof{
Header: data.Data.Attested.Beacon,
PayloadHeader: attestedExecHeader,
PayloadBranch: data.Data.Attested.ExecutionBranch,
},
Signature: data.Data.Aggregate,
SignatureSlot: uint64(data.Data.SignatureSlot),
}, nil
}
// GetFinalityUpdate fetches the latest available finality update.
//
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
func (api *BeaconLightApi) GetFinalityUpdate() (types.FinalityUpdate, error) {
resp, err := api.httpGet("/eth/v1/beacon/light_client/finality_update")
if err != nil {
return types.FinalityUpdate{}, err
}
return decodeFinalityUpdate(resp)
}
func decodeFinalityUpdate(enc []byte) (types.FinalityUpdate, error) {
var data struct {
Version string
Data struct {
Attested jsonHeaderWithExecProof `json:"attested_header"`
Finalized jsonHeaderWithExecProof `json:"finalized_header"`
FinalityBranch merkle.Values `json:"finality_branch"`
Aggregate types.SyncAggregate `json:"sync_aggregate"`
SignatureSlot common.Decimal `json:"signature_slot"`
}
}
if err := json.Unmarshal(enc, &data); err != nil {
return types.FinalityUpdate{}, err
}
// Decode the execution payload headers.
attestedExecHeader, err := types.ExecutionHeaderFromJSON(data.Version, data.Data.Attested.Execution)
if err != nil {
return types.FinalityUpdate{}, fmt.Errorf("invalid attested header: %v", err)
}
finalizedExecHeader, err := types.ExecutionHeaderFromJSON(data.Version, data.Data.Finalized.Execution)
if err != nil {
return types.FinalityUpdate{}, fmt.Errorf("invalid finalized header: %v", err)
}
// Perform sanity checks.
if len(data.Data.Aggregate.Signers) != params.SyncCommitteeBitmaskSize {
return types.FinalityUpdate{}, errors.New("invalid sync_committee_bits length")
}
if len(data.Data.Aggregate.Signature) != params.BLSSignatureSize {
return types.FinalityUpdate{}, errors.New("invalid sync_committee_signature length")
}
return types.FinalityUpdate{
Attested: types.HeaderWithExecProof{
Header: data.Data.Attested.Beacon,
PayloadHeader: attestedExecHeader,
PayloadBranch: data.Data.Attested.ExecutionBranch,
},
Finalized: types.HeaderWithExecProof{
Header: data.Data.Finalized.Beacon,
PayloadHeader: finalizedExecHeader,
PayloadBranch: data.Data.Finalized.ExecutionBranch,
},
FinalityBranch: data.Data.FinalityBranch,
Signature: data.Data.Aggregate,
SignatureSlot: uint64(data.Data.SignatureSlot),
}, nil
}
// GetHeader fetches and validates the beacon header with the given blockRoot.
// If blockRoot is null hash then the latest head header is fetched.
// The values of the canonical and finalized flags are also returned. Note that
// these flags are not validated.
func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, bool, bool, error) {
var blockId string
if blockRoot == (common.Hash{}) {
blockId = "head"
} else {
blockId = blockRoot.Hex()
}
resp, err := api.httpGetf("/eth/v1/beacon/headers/%s", blockId)
if err != nil {
return types.Header{}, false, false, err
}
var data struct {
Finalized bool `json:"finalized"`
Data struct {
Root common.Hash `json:"root"`
Canonical bool `json:"canonical"`
Header struct {
Message types.Header `json:"message"`
Signature hexutil.Bytes `json:"signature"`
} `json:"header"`
} `json:"data"`
}
if err := json.Unmarshal(resp, &data); err != nil {
return types.Header{}, false, false, err
}
header := data.Data.Header.Message
if blockRoot == (common.Hash{}) {
blockRoot = data.Data.Root
}
if header.Hash() != blockRoot {
return types.Header{}, false, false, errors.New("retrieved beacon header root does not match")
}
return header, data.Data.Canonical, data.Finalized, nil
}
// GetCheckpointData fetches and validates bootstrap data belonging to the given checkpoint.
func (api *BeaconLightApi) GetCheckpointData(checkpointHash common.Hash) (*types.BootstrapData, error) {
resp, err := api.httpGetf("/eth/v1/beacon/light_client/bootstrap/0x%x", checkpointHash[:])
if err != nil {
return nil, err
}
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
type bootstrapData struct {
Data struct {
Header jsonBeaconHeader `json:"header"`
Committee *types.SerializedSyncCommittee `json:"current_sync_committee"`
CommitteeBranch merkle.Values `json:"current_sync_committee_branch"`
} `json:"data"`
}
var data bootstrapData
if err := json.Unmarshal(resp, &data); err != nil {
return nil, err
}
if data.Data.Committee == nil {
return nil, errors.New("sync committee is missing")
}
header := data.Data.Header.Beacon
if header.Hash() != checkpointHash {
return nil, fmt.Errorf("invalid checkpoint block header, have %v want %v", header.Hash(), checkpointHash)
}
checkpoint := &types.BootstrapData{
Header: header,
CommitteeBranch: data.Data.CommitteeBranch,
CommitteeRoot: data.Data.Committee.Root(),
Committee: data.Data.Committee,
}
if err := checkpoint.Validate(); err != nil {
return nil, fmt.Errorf("invalid checkpoint: %w", err)
}
if checkpoint.Header.Hash() != checkpointHash {
return nil, errors.New("wrong checkpoint hash")
}
return checkpoint, nil
}
func (api *BeaconLightApi) GetBeaconBlock(blockRoot common.Hash) (*types.BeaconBlock, error) {
resp, err := api.httpGetf("/eth/v2/beacon/blocks/0x%x", blockRoot)
if err != nil {
return nil, err
}
var beaconBlockMessage struct {
Version string
Data struct {
Message json.RawMessage `json:"message"`
}
}
if err := json.Unmarshal(resp, &beaconBlockMessage); err != nil {
return nil, fmt.Errorf("invalid block json data: %v", err)
}
block, err := types.BlockFromJSON(beaconBlockMessage.Version, beaconBlockMessage.Data.Message)
if err != nil {
return nil, err
}
computedRoot := block.Root()
if computedRoot != blockRoot {
return nil, fmt.Errorf("Beacon block root hash mismatch (expected: %x, got: %x)", blockRoot, computedRoot)
}
return block, nil
}
func decodeHeadEvent(enc []byte) (uint64, common.Hash, error) {
var data struct {
Slot common.Decimal `json:"slot"`
Block common.Hash `json:"block"`
}
if err := json.Unmarshal(enc, &data); err != nil {
return 0, common.Hash{}, err
}
return uint64(data.Slot), data.Block, nil
}
type HeadEventListener struct {
OnNewHead func(slot uint64, blockRoot common.Hash)
OnOptimistic func(head types.OptimisticUpdate)
OnFinality func(head types.FinalityUpdate)
OnError func(err error)
}
// StartHeadListener creates an event subscription for heads and signed (optimistic)
// head updates and calls the specified callback functions when they are received.
// The callbacks are also called for the current head and optimistic head at startup.
// They are never called concurrently.
func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func() {
var (
ctx, closeCtx = context.WithCancel(context.Background())
streamCh = make(chan *eventsource.Stream, 1)
wg sync.WaitGroup
)
// When connected to a Lodestar node the subscription blocks until the first actual
// event arrives; therefore we create the subscription in a separate goroutine while
// letting the main goroutine sync up to the current head.
wg.Add(1)
go func() {
defer wg.Done()
stream := api.startEventStream(ctx, &listener)
if stream == nil {
// This case happens when the context was closed.
return
}
// Stream was opened, wait for close signal.
streamCh <- stream
<-ctx.Done()
stream.Close()
}()
wg.Add(1)
go func() {
defer wg.Done()
// Request initial data.
log.Trace("Requesting initial head header")
if head, _, _, err := api.GetHeader(common.Hash{}); err == nil {
log.Trace("Retrieved initial head header", "slot", head.Slot, "hash", head.Hash())
listener.OnNewHead(head.Slot, head.Hash())
} else {
log.Debug("Failed to retrieve initial head header", "error", err)
}
log.Trace("Requesting initial optimistic update")
if optimisticUpdate, err := api.GetOptimisticUpdate(); err == nil {
log.Trace("Retrieved initial optimistic update", "slot", optimisticUpdate.Attested.Slot, "hash", optimisticUpdate.Attested.Hash())
listener.OnOptimistic(optimisticUpdate)
} else {
log.Debug("Failed to retrieve initial optimistic update", "error", err)
}
log.Trace("Requesting initial finality update")
if finalityUpdate, err := api.GetFinalityUpdate(); err == nil {
log.Trace("Retrieved initial finality update", "slot", finalityUpdate.Finalized.Slot, "hash", finalityUpdate.Finalized.Hash())
listener.OnFinality(finalityUpdate)
} else {
log.Debug("Failed to retrieve initial finality update", "error", err)
}
log.Trace("Starting event stream processing loop")
// Receive the stream.
var stream *eventsource.Stream
select {
case stream = <-streamCh:
case <-ctx.Done():
log.Trace("Stopping event stream processing loop")
return
}
for {
select {
case event, ok := <-stream.Events:
if !ok {
log.Trace("Event stream closed")
return
}
log.Trace("New event received from event stream", "type", event.Event())
switch event.Event() {
case "head":
slot, blockRoot, err := decodeHeadEvent([]byte(event.Data()))
if err == nil {
listener.OnNewHead(slot, blockRoot)
} else {
listener.OnError(fmt.Errorf("error decoding head event: %v", err))
}
case "light_client_optimistic_update":
optimisticUpdate, err := decodeOptimisticUpdate([]byte(event.Data()))
if err == nil {
listener.OnOptimistic(optimisticUpdate)
} else {
listener.OnError(fmt.Errorf("error decoding optimistic update event: %v", err))
}
case "light_client_finality_update":
finalityUpdate, err := decodeFinalityUpdate([]byte(event.Data()))
if err == nil {
listener.OnFinality(finalityUpdate)
} else {
listener.OnError(fmt.Errorf("error decoding finality update event: %v", err))
}
default:
listener.OnError(fmt.Errorf("unexpected event: %s", event.Event()))
}
case err, ok := <-stream.Errors:
if !ok {
return
}
listener.OnError(err)
}
}
}()
return func() {
closeCtx()
wg.Wait()
}
}
// startEventStream establishes an event stream. This will keep retrying until the stream has been
// established. It can only return nil when the context is canceled.
func (api *BeaconLightApi) startEventStream(ctx context.Context, listener *HeadEventListener) *eventsource.Stream {
for retry := true; retry; retry = ctxSleep(ctx, 5*time.Second) {
path := "/eth/v1/events?topics=head&topics=light_client_finality_update&topics=light_client_optimistic_update"
log.Trace("Sending event subscription request")
req, err := http.NewRequestWithContext(ctx, "GET", api.url+path, nil)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription request: %v", err))
continue
}
for k, v := range api.customHeaders {
req.Header.Set(k, v)
}
stream, err := eventsource.SubscribeWithRequest("", req)
if err != nil {
listener.OnError(fmt.Errorf("error creating event subscription: %v", err))
continue
}
log.Trace("Successfully created event stream")
return stream
}
return nil
}
func ctxSleep(ctx context.Context, timeout time.Duration) (ok bool) {
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case <-timer.C:
return true
case <-ctx.Done():
return false
}
}

@ -70,6 +70,7 @@ type CommitteeChain struct {
committees *canonicalStore[*types.SerializedSyncCommittee] committees *canonicalStore[*types.SerializedSyncCommittee]
fixedCommitteeRoots *canonicalStore[common.Hash] fixedCommitteeRoots *canonicalStore[common.Hash]
committeeCache *lru.Cache[uint64, syncCommittee] // cache deserialized committees committeeCache *lru.Cache[uint64, syncCommittee] // cache deserialized committees
changeCounter uint64
clock mclock.Clock // monotonic clock (simulated clock in tests) clock mclock.Clock // monotonic clock (simulated clock in tests)
unixNano func() int64 // system clock (simulated clock in tests) unixNano func() int64 // system clock (simulated clock in tests)
@ -86,6 +87,11 @@ func NewCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signer
return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() }) return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() })
} }
// NewTestCommitteeChain creates a new CommitteeChain for testing.
func NewTestCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
return newCommitteeChain(db, config, signerThreshold, enforceTime, dummyVerifier{}, clock, func() int64 { return int64(clock.Now()) })
}
// newCommitteeChain creates a new CommitteeChain with the option of replacing the // newCommitteeChain creates a new CommitteeChain with the option of replacing the
// clock source and signature verification for testing purposes. // clock source and signature verification for testing purposes.
func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain { func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
@ -181,20 +187,20 @@ func (s *CommitteeChain) Reset() {
if err := s.rollback(0); err != nil { if err := s.rollback(0); err != nil {
log.Error("Error writing batch into chain database", "error", err) log.Error("Error writing batch into chain database", "error", err)
} }
s.changeCounter++
} }
// CheckpointInit initializes a CommitteeChain based on the checkpoint. // CheckpointInit initializes a CommitteeChain based on a checkpoint.
// Note: if the chain is already initialized and the committees proven by the // Note: if the chain is already initialized and the committees proven by the
// checkpoint do match the existing chain then the chain is retained and the // checkpoint do match the existing chain then the chain is retained and the
// new checkpoint becomes fixed. // new checkpoint becomes fixed.
func (s *CommitteeChain) CheckpointInit(bootstrap *types.BootstrapData) error { func (s *CommitteeChain) CheckpointInit(bootstrap types.BootstrapData) error {
s.chainmu.Lock() s.chainmu.Lock()
defer s.chainmu.Unlock() defer s.chainmu.Unlock()
if err := bootstrap.Validate(); err != nil { if err := bootstrap.Validate(); err != nil {
return err return err
} }
period := bootstrap.Header.SyncPeriod() period := bootstrap.Header.SyncPeriod()
if err := s.deleteFixedCommitteeRootsFrom(period + 2); err != nil { if err := s.deleteFixedCommitteeRootsFrom(period + 2); err != nil {
s.Reset() s.Reset()
@ -215,6 +221,7 @@ func (s *CommitteeChain) CheckpointInit(bootstrap *types.BootstrapData) error {
s.Reset() s.Reset()
return err return err
} }
s.changeCounter++
return nil return nil
} }
@ -367,6 +374,7 @@ func (s *CommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommi
return ErrWrongCommitteeRoot return ErrWrongCommitteeRoot
} }
} }
s.changeCounter++
if reorg { if reorg {
if err := s.rollback(period + 1); err != nil { if err := s.rollback(period + 1); err != nil {
return err return err
@ -405,6 +413,13 @@ func (s *CommitteeChain) NextSyncPeriod() (uint64, bool) {
return s.committees.periods.End - 1, true return s.committees.periods.End - 1, true
} }
func (s *CommitteeChain) ChangeCounter() uint64 {
s.chainmu.RLock()
defer s.chainmu.RUnlock()
return s.changeCounter
}
// rollback removes all committees and fixed roots from the given period and updates // rollback removes all committees and fixed roots from the given period and updates
// starting from the previous period. // starting from the previous period.
func (s *CommitteeChain) rollback(period uint64) error { func (s *CommitteeChain) rollback(period uint64) error {
@ -452,12 +467,12 @@ func (s *CommitteeChain) getSyncCommittee(period uint64) (syncCommittee, error)
if sc, ok := s.committees.get(s.db, period); ok { if sc, ok := s.committees.get(s.db, period); ok {
c, err := s.sigVerifier.deserializeSyncCommittee(sc) c, err := s.sigVerifier.deserializeSyncCommittee(sc)
if err != nil { if err != nil {
return nil, fmt.Errorf("Sync committee #%d deserialization error: %v", period, err) return nil, fmt.Errorf("sync committee #%d deserialization error: %v", period, err)
} }
s.committeeCache.Add(period, c) s.committeeCache.Add(period, c)
return c, nil return c, nil
} }
return nil, fmt.Errorf("Missing serialized sync committee #%d", period) return nil, fmt.Errorf("missing serialized sync committee #%d", period)
} }
// VerifySignedHeader returns true if the given signed header has a valid signature // VerifySignedHeader returns true if the given signed header has a valid signature

@ -241,12 +241,12 @@ func newCommitteeChainTest(t *testing.T, config types.ChainConfig, signerThresho
signerThreshold: signerThreshold, signerThreshold: signerThreshold,
enforceTime: enforceTime, enforceTime: enforceTime,
} }
c.chain = newCommitteeChain(c.db, &config, signerThreshold, enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) }) c.chain = NewTestCommitteeChain(c.db, &config, signerThreshold, enforceTime, c.clock)
return c return c
} }
func (c *committeeChainTest) reloadChain() { func (c *committeeChainTest) reloadChain() {
c.chain = newCommitteeChain(c.db, &c.config, c.signerThreshold, c.enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) }) c.chain = NewTestCommitteeChain(c.db, &c.config, c.signerThreshold, c.enforceTime, c.clock)
} }
func (c *committeeChainTest) setClockPeriod(period float64) { func (c *committeeChainTest) setClockPeriod(period float64) {

@ -0,0 +1,161 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package light
import (
"errors"
"sync"
"time"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/log"
)
// HeadTracker keeps track of the latest validated head and the "prefetch" head
// which is the (not necessarily validated) head announced by the majority of
// servers.
type HeadTracker struct {
lock sync.RWMutex
committeeChain *CommitteeChain
minSignerCount int
optimisticUpdate types.OptimisticUpdate
hasOptimisticUpdate bool
finalityUpdate types.FinalityUpdate
hasFinalityUpdate bool
prefetchHead types.HeadInfo
changeCounter uint64
}
// NewHeadTracker creates a new HeadTracker.
func NewHeadTracker(committeeChain *CommitteeChain, minSignerCount int) *HeadTracker {
return &HeadTracker{
committeeChain: committeeChain,
minSignerCount: minSignerCount,
}
}
// ValidatedOptimistic returns the latest validated optimistic update.
func (h *HeadTracker) ValidatedOptimistic() (types.OptimisticUpdate, bool) {
h.lock.RLock()
defer h.lock.RUnlock()
return h.optimisticUpdate, h.hasOptimisticUpdate
}
// ValidatedFinality returns the latest validated finality update.
func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
h.lock.RLock()
defer h.lock.RUnlock()
return h.finalityUpdate, h.hasFinalityUpdate
}
// ValidateOptimistic validates the given optimistic update. If the update is
// successfully validated and it is better than the old validated update (higher
// slot or same slot and more signers) then ValidatedOptimistic is updated.
// The boolean return flag signals if ValidatedOptimistic has been changed.
func (h *HeadTracker) ValidateOptimistic(update types.OptimisticUpdate) (bool, error) {
h.lock.Lock()
defer h.lock.Unlock()
if err := update.Validate(); err != nil {
return false, err
}
replace, err := h.validate(update.SignedHeader(), h.optimisticUpdate.SignedHeader())
if replace {
h.optimisticUpdate, h.hasOptimisticUpdate = update, true
h.changeCounter++
}
return replace, err
}
// ValidateFinality validates the given finality update. If the update is
// successfully validated and it is better than the old validated update (higher
// slot or same slot and more signers) then ValidatedFinality is updated.
// The boolean return flag signals if ValidatedFinality has been changed.
func (h *HeadTracker) ValidateFinality(update types.FinalityUpdate) (bool, error) {
h.lock.Lock()
defer h.lock.Unlock()
if err := update.Validate(); err != nil {
return false, err
}
replace, err := h.validate(update.SignedHeader(), h.finalityUpdate.SignedHeader())
if replace {
h.finalityUpdate, h.hasFinalityUpdate = update, true
h.changeCounter++
}
return replace, err
}
func (h *HeadTracker) validate(head, oldHead types.SignedHeader) (bool, error) {
signerCount := head.Signature.SignerCount()
if signerCount < h.minSignerCount {
return false, errors.New("low signer count")
}
if head.Header.Slot < oldHead.Header.Slot || (head.Header.Slot == oldHead.Header.Slot && signerCount <= oldHead.Signature.SignerCount()) {
return false, nil
}
sigOk, age, err := h.committeeChain.VerifySignedHeader(head)
if err != nil {
return false, err
}
if age < 0 {
log.Warn("Future signed head received", "age", age)
}
if age > time.Minute*2 {
log.Warn("Old signed head received", "age", age)
}
if !sigOk {
return false, errors.New("invalid header signature")
}
return true, nil
}
// PrefetchHead returns the latest known prefetch head's head info.
// This head can be used to start fetching related data hoping that it will be
// validated soon.
// Note that the prefetch head cannot be validated cryptographically so it should
// only be used as a performance optimization hint.
func (h *HeadTracker) PrefetchHead() types.HeadInfo {
h.lock.RLock()
defer h.lock.RUnlock()
return h.prefetchHead
}
// SetPrefetchHead sets the prefetch head info.
// Note that HeadTracker does not verify the prefetch head, just acts as a thread
// safe bulletin board.
func (h *HeadTracker) SetPrefetchHead(head types.HeadInfo) {
h.lock.Lock()
defer h.lock.Unlock()
if head == h.prefetchHead {
return
}
h.prefetchHead = head
h.changeCounter++
}
// ChangeCounter implements request.targetData
func (h *HeadTracker) ChangeCounter() uint64 {
h.lock.RLock()
defer h.lock.RUnlock()
return h.changeCounter
}

@ -0,0 +1,403 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package request
import (
"sync"
"github.com/ethereum/go-ethereum/log"
)
// Module represents a mechanism which is typically responsible for downloading
// and updating a passive data structure. It does not directly interact with the
// servers. It can start requests using the Requester interface, maintain its
// internal state by receiving and processing Events and update its target data
// structure based on the obtained data.
// It is the Scheduler's responsibility to feed events to the modules, call
// Process as long as there might be something to process and then generate request
// candidates using MakeRequest and start the best possible requests.
// Modules are called by Scheduler whenever a global trigger is fired. All events
// fire the trigger. Changing a target data structure also triggers a next
// processing round as it could make further actions possible either by the same
// or another Module.
type Module interface {
// Process is a non-blocking function responsible for starting requests,
// processing events and updating the target data structures(s) and the
// internal state of the module. Module state typically consists of information
// about pending requests and registered servers.
// Process is always called after an event is received or after a target data
// structure has been changed.
//
// Note: Process functions of different modules are never called concurrently;
// they are called by Scheduler in the same order of priority as they were
// registered in.
Process(Requester, []Event)
}
// Requester allows Modules to obtain the list of momentarily available servers,
// start new requests and report server failure when a response has been proven
// to be invalid in the processing phase.
// Note that all Requester functions should be safe to call from Module.Process.
type Requester interface {
CanSendTo() []Server
Send(Server, Request) ID
Fail(Server, string)
}
// Scheduler is a modular network data retrieval framework that coordinates multiple
// servers and retrieval mechanisms (modules). It implements a trigger mechanism
// that calls the Process function of registered modules whenever either the state
// of existing data structures or events coming from registered servers could
// allow new operations.
type Scheduler struct {
lock sync.Mutex
modules []Module // first has the highest priority
names map[Module]string
servers map[server]struct{}
targets map[targetData]uint64
requesterLock sync.RWMutex
serverOrder []server
pending map[ServerAndID]pendingRequest
// eventLock guards access to the events list. Note that eventLock can be
// locked either while lock is locked or unlocked but lock cannot be locked
// while eventLock is locked.
eventLock sync.Mutex
events []Event
stopCh chan chan struct{}
triggerCh chan struct{} // restarts waiting sync loop
// if trigger has already been fired then send to testWaitCh blocks until
// the triggered processing round is finished
testWaitCh chan struct{}
}
type (
// Server identifies a server without allowing any direct interaction.
// Note: server interface is used by Scheduler and Tracker but not used by
// the modules that do not interact with them directly.
// In order to make module testing easier, Server interface is used in
// events and modules.
Server interface {
Name() string
}
Request any
Response any
ID uint64
ServerAndID struct {
Server Server
ID ID
}
)
// targetData represents a registered target data structure that increases its
// ChangeCounter whenever it has been changed.
type targetData interface {
ChangeCounter() uint64
}
// pendingRequest keeps track of sent and not yet finalized requests and their
// sender modules.
type pendingRequest struct {
request Request
module Module
}
// NewScheduler creates a new Scheduler.
func NewScheduler() *Scheduler {
s := &Scheduler{
servers: make(map[server]struct{}),
names: make(map[Module]string),
pending: make(map[ServerAndID]pendingRequest),
targets: make(map[targetData]uint64),
stopCh: make(chan chan struct{}),
// Note: testWaitCh should not have capacity in order to ensure
// that after a trigger happens testWaitCh will block until the resulting
// processing round has been finished
triggerCh: make(chan struct{}, 1),
testWaitCh: make(chan struct{}),
}
return s
}
// RegisterTarget registers a target data structure, ensuring that any changes
// made to it trigger a new round of Module.Process calls, giving a chance to
// modules to react to the changes.
func (s *Scheduler) RegisterTarget(t targetData) {
s.lock.Lock()
defer s.lock.Unlock()
s.targets[t] = 0
}
// RegisterModule registers a module. Should be called before starting the scheduler.
// In each processing round the order of module processing depends on the order of
// registration.
func (s *Scheduler) RegisterModule(m Module, name string) {
s.lock.Lock()
defer s.lock.Unlock()
s.modules = append(s.modules, m)
s.names[m] = name
}
// RegisterServer registers a new server.
func (s *Scheduler) RegisterServer(server server) {
s.lock.Lock()
defer s.lock.Unlock()
s.addEvent(Event{Type: EvRegistered, Server: server})
server.subscribe(func(event Event) {
event.Server = server
s.addEvent(event)
})
}
// UnregisterServer removes a registered server.
func (s *Scheduler) UnregisterServer(server server) {
s.lock.Lock()
defer s.lock.Unlock()
server.unsubscribe()
s.addEvent(Event{Type: EvUnregistered, Server: server})
}
// Start starts the scheduler. It should be called after registering all modules
// and before registering any servers.
func (s *Scheduler) Start() {
go s.syncLoop()
}
// Stop stops the scheduler.
func (s *Scheduler) Stop() {
stop := make(chan struct{})
s.stopCh <- stop
<-stop
s.lock.Lock()
for server := range s.servers {
server.unsubscribe()
}
s.servers = nil
s.lock.Unlock()
}
// syncLoop is the main event loop responsible for event/data processing and
// sending new requests.
// A round of processing starts whenever the global trigger is fired. Triggers
// fired during a processing round ensure that there is going to be a next round.
func (s *Scheduler) syncLoop() {
for {
s.lock.Lock()
s.processRound()
s.lock.Unlock()
loop:
for {
select {
case stop := <-s.stopCh:
close(stop)
return
case <-s.triggerCh:
break loop
case <-s.testWaitCh:
}
}
}
}
// targetChanged returns true if a registered target data structure has been
// changed since the last call to this function.
func (s *Scheduler) targetChanged() (changed bool) {
for target, counter := range s.targets {
if newCounter := target.ChangeCounter(); newCounter != counter {
s.targets[target] = newCounter
changed = true
}
}
return
}
// processRound runs an entire processing round. It calls the Process functions
// of all modules, passing all relevant events and repeating Process calls as
// long as any changes have been made to the registered target data structures.
// Once all events have been processed and a stable state has been achieved,
// requests are generated and sent if necessary and possible.
func (s *Scheduler) processRound() {
for {
log.Trace("Processing modules")
filteredEvents := s.filterEvents()
for _, module := range s.modules {
log.Trace("Processing module", "name", s.names[module], "events", len(filteredEvents[module]))
module.Process(requester{s, module}, filteredEvents[module])
}
if !s.targetChanged() {
break
}
}
}
// Trigger starts a new processing round. If fired during processing, it ensures
// another full round of processing all modules.
func (s *Scheduler) Trigger() {
select {
case s.triggerCh <- struct{}{}:
default:
}
}
// addEvent adds an event to be processed in the next round. Note that it can be
// called regardless of the state of the lock mutex, making it safe for use in
// the server event callback.
func (s *Scheduler) addEvent(event Event) {
s.eventLock.Lock()
s.events = append(s.events, event)
s.eventLock.Unlock()
s.Trigger()
}
// filterEvent sorts each Event either as a request event or a server event,
// depending on its type. Request events are also sorted in a map based on the
// module that originally initiated the request. It also ensures that no events
// related to a server are returned before EvRegistered or after EvUnregistered.
// In case of an EvUnregistered server event it also closes all pending requests
// to the given server by adding a failed request event (EvFail), ensuring that
// all requests get finalized and thereby allowing the module logic to be safe
// and simple.
func (s *Scheduler) filterEvents() map[Module][]Event {
s.eventLock.Lock()
events := s.events
s.events = nil
s.eventLock.Unlock()
s.requesterLock.Lock()
defer s.requesterLock.Unlock()
filteredEvents := make(map[Module][]Event)
for _, event := range events {
server := event.Server.(server)
if _, ok := s.servers[server]; !ok && event.Type != EvRegistered {
continue // before EvRegister or after EvUnregister, discard
}
if event.IsRequestEvent() {
sid, _, _ := event.RequestInfo()
pending, ok := s.pending[sid]
if !ok {
continue // request already closed, ignore further events
}
if event.Type == EvResponse || event.Type == EvFail {
delete(s.pending, sid) // final event, close pending request
}
filteredEvents[pending.module] = append(filteredEvents[pending.module], event)
} else {
switch event.Type {
case EvRegistered:
s.servers[server] = struct{}{}
s.serverOrder = append(s.serverOrder, nil)
copy(s.serverOrder[1:], s.serverOrder[:len(s.serverOrder)-1])
s.serverOrder[0] = server
case EvUnregistered:
s.closePending(event.Server, filteredEvents)
delete(s.servers, server)
for i, srv := range s.serverOrder {
if srv == server {
copy(s.serverOrder[i:len(s.serverOrder)-1], s.serverOrder[i+1:])
s.serverOrder = s.serverOrder[:len(s.serverOrder)-1]
break
}
}
}
for _, module := range s.modules {
filteredEvents[module] = append(filteredEvents[module], event)
}
}
}
return filteredEvents
}
// closePending closes all pending requests to the given server and adds an EvFail
// event to properly finalize them
func (s *Scheduler) closePending(server Server, filteredEvents map[Module][]Event) {
for sid, pending := range s.pending {
if sid.Server == server {
filteredEvents[pending.module] = append(filteredEvents[pending.module], Event{
Type: EvFail,
Server: server,
Data: RequestResponse{
ID: sid.ID,
Request: pending.request,
},
})
delete(s.pending, sid)
}
}
}
// requester implements Requester. Note that while requester basically wraps
// Scheduler (with the added information of the currently processed Module), all
// functions are safe to call from Module.Process which is running while
// the Scheduler.lock mutex is held.
type requester struct {
*Scheduler
module Module
}
// CanSendTo returns the list of currently available servers. It also returns
// them in an order of least to most recently used, ensuring a round-robin usage
// of suitable servers if the module always chooses the first suitable one.
func (s requester) CanSendTo() []Server {
s.requesterLock.RLock()
defer s.requesterLock.RUnlock()
list := make([]Server, 0, len(s.serverOrder))
for _, server := range s.serverOrder {
if server.canRequestNow() {
list = append(list, server)
}
}
return list
}
// Send sends a request and adds an entry to Scheduler.pending map, ensuring that
// related request events will be delivered to the sender Module.
func (s requester) Send(srv Server, req Request) ID {
s.requesterLock.Lock()
defer s.requesterLock.Unlock()
server := srv.(server)
id := server.sendRequest(req)
sid := ServerAndID{Server: srv, ID: id}
s.pending[sid] = pendingRequest{request: req, module: s.module}
for i, ss := range s.serverOrder {
if ss == server {
copy(s.serverOrder[i:len(s.serverOrder)-1], s.serverOrder[i+1:])
s.serverOrder[len(s.serverOrder)-1] = server
return id
}
}
log.Error("Target server not found in ordered list of registered servers")
return id
}
// Fail should be called when a server delivers invalid or useless information.
// Calling Fail disables the given server for a period that is initially short
// but is exponentially growing if it happens frequently. This results in a
// somewhat fault tolerant operation that avoids hammering servers with requests
// that they cannot serve but still gives them a chance periodically.
func (s requester) Fail(srv Server, desc string) {
srv.(server).fail(desc)
}

@ -0,0 +1,126 @@
package request
import (
"reflect"
"testing"
)
func TestEventFilter(t *testing.T) {
s := NewScheduler()
module1 := &testModule{name: "module1"}
module2 := &testModule{name: "module2"}
s.RegisterModule(module1, "module1")
s.RegisterModule(module2, "module2")
s.Start()
// startup process round without events
s.testWaitCh <- struct{}{}
module1.expProcess(t, nil)
module2.expProcess(t, nil)
srv := &testServer{}
// register server; both modules should receive server event
s.RegisterServer(srv)
s.testWaitCh <- struct{}{}
module1.expProcess(t, []Event{
{Type: EvRegistered, Server: srv},
})
module2.expProcess(t, []Event{
{Type: EvRegistered, Server: srv},
})
// let module1 send a request
srv.canRequest = 1
module1.sendReq = testRequest
s.Trigger()
// in first triggered round module1 sends the request, no events yet
s.testWaitCh <- struct{}{}
module1.expProcess(t, nil)
module2.expProcess(t, nil)
// server emits EvTimeout; only module1 should receive it
srv.eventCb(Event{Type: EvTimeout, Data: RequestResponse{ID: 1, Request: testRequest}})
s.testWaitCh <- struct{}{}
module1.expProcess(t, []Event{
{Type: EvTimeout, Server: srv, Data: RequestResponse{ID: 1, Request: testRequest}},
})
module2.expProcess(t, nil)
// unregister server; both modules should receive server event
s.UnregisterServer(srv)
s.testWaitCh <- struct{}{}
module1.expProcess(t, []Event{
// module1 should also receive EvFail on its pending request
{Type: EvFail, Server: srv, Data: RequestResponse{ID: 1, Request: testRequest}},
{Type: EvUnregistered, Server: srv},
})
module2.expProcess(t, []Event{
{Type: EvUnregistered, Server: srv},
})
// response after server unregistered; should be discarded
srv.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
s.testWaitCh <- struct{}{}
module1.expProcess(t, nil)
module2.expProcess(t, nil)
// no more process rounds expected; shut down
s.testWaitCh <- struct{}{}
module1.expNoMoreProcess(t)
module2.expNoMoreProcess(t)
s.Stop()
}
type testServer struct {
eventCb func(Event)
lastID ID
canRequest int
}
func (s *testServer) Name() string {
return ""
}
func (s *testServer) subscribe(eventCb func(Event)) {
s.eventCb = eventCb
}
func (s *testServer) canRequestNow() bool {
return s.canRequest > 0
}
func (s *testServer) sendRequest(req Request) ID {
s.canRequest--
s.lastID++
return s.lastID
}
func (s *testServer) fail(string) {}
func (s *testServer) unsubscribe() {}
type testModule struct {
name string
processed [][]Event
sendReq Request
}
func (m *testModule) Process(requester Requester, events []Event) {
m.processed = append(m.processed, events)
if m.sendReq != nil {
if cs := requester.CanSendTo(); len(cs) > 0 {
requester.Send(cs[0], m.sendReq)
}
}
}
func (m *testModule) expProcess(t *testing.T, expEvents []Event) {
if len(m.processed) == 0 {
t.Errorf("Missing call to %s.Process", m.name)
return
}
events := m.processed[0]
m.processed = m.processed[1:]
if !reflect.DeepEqual(events, expEvents) {
t.Errorf("Call to %s.Process with wrong events (expected %v, got %v)", m.name, expEvents, events)
}
}
func (m *testModule) expNoMoreProcess(t *testing.T) {
for len(m.processed) > 0 {
t.Errorf("Unexpected call to %s.Process with events %v", m.name, m.processed[0])
m.processed = m.processed[1:]
}
}

@ -0,0 +1,451 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package request
import (
"math"
"sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/log"
)
var (
// request events
EvResponse = &EventType{Name: "response", requestEvent: true} // data: RequestResponse; sent by requestServer
EvFail = &EventType{Name: "fail", requestEvent: true} // data: RequestResponse; sent by requestServer
EvTimeout = &EventType{Name: "timeout", requestEvent: true} // data: RequestResponse; sent by serverWithTimeout
// server events
EvRegistered = &EventType{Name: "registered"} // data: nil; sent by Scheduler
EvUnregistered = &EventType{Name: "unregistered"} // data: nil; sent by Scheduler
EvCanRequestAgain = &EventType{Name: "canRequestAgain"} // data: nil; sent by serverWithLimits
)
const (
softRequestTimeout = time.Second // allow resending request to a different server but do not cancel yet
hardRequestTimeout = time.Second * 10 // cancel request
)
const (
// serverWithLimits parameters
parallelAdjustUp = 0.1 // adjust parallelLimit up in case of success under full load
parallelAdjustDown = 1 // adjust parallelLimit down in case of timeout/failure
minParallelLimit = 1 // parallelLimit lower bound
defaultParallelLimit = 3 // parallelLimit initial value
minFailureDelay = time.Millisecond * 100 // minimum disable time in case of request failure
maxFailureDelay = time.Minute // maximum disable time in case of request failure
maxServerEventBuffer = 5 // server event allowance buffer limit
maxServerEventRate = time.Second // server event allowance buffer recharge rate
)
// requestServer can send requests in a non-blocking way and feed back events
// through the event callback. After each request it should send back either
// EvResponse or EvFail. Additionally, it may also send application-defined
// events that the Modules can interpret.
type requestServer interface {
Name() string
Subscribe(eventCallback func(Event))
SendRequest(ID, Request)
Unsubscribe()
}
// server is implemented by a requestServer wrapped into serverWithTimeout and
// serverWithLimits and is used by Scheduler.
// In addition to requestServer functionality, server can also handle timeouts,
// limit the number of parallel in-flight requests and temporarily disable
// new requests based on timeouts and response failures.
type server interface {
Server
subscribe(eventCallback func(Event))
canRequestNow() bool
sendRequest(Request) ID
fail(string)
unsubscribe()
}
// NewServer wraps a requestServer and returns a server
func NewServer(rs requestServer, clock mclock.Clock) server {
s := &serverWithLimits{}
s.parent = rs
s.serverWithTimeout.init(clock)
s.init()
return s
}
// EventType identifies an event type, either related to a request or the server
// in general. Server events can also be externally defined.
type EventType struct {
Name string
requestEvent bool // all request events are pre-defined in request package
}
// Event describes an event where the type of Data depends on Type.
// Server field is not required when sent through the event callback; it is filled
// out when processed by the Scheduler. Note that the Scheduler can also create
// and send events (EvRegistered, EvUnregistered) directly.
type Event struct {
Type *EventType
Server Server // filled by Scheduler
Data any
}
// IsRequestEvent returns true if the event is a request event
func (e *Event) IsRequestEvent() bool {
return e.Type.requestEvent
}
// RequestInfo assumes that the event is a request event and returns its contents
// in a convenient form.
func (e *Event) RequestInfo() (ServerAndID, Request, Response) {
data := e.Data.(RequestResponse)
return ServerAndID{Server: e.Server, ID: data.ID}, data.Request, data.Response
}
// RequestResponse is the Data type of request events.
type RequestResponse struct {
ID ID
Request Request
Response Response
}
// serverWithTimeout wraps a requestServer and introduces timeouts.
// The request's lifecycle is concluded if EvResponse or EvFail emitted by the
// parent requestServer. If this does not happen until softRequestTimeout then
// EvTimeout is emitted, after which the final EvResponse or EvFail is still
// guaranteed to follow.
// If the parent fails to send this final event for hardRequestTimeout then
// serverWithTimeout emits EvFail and discards any further events from the
// parent related to the given request.
type serverWithTimeout struct {
parent requestServer
lock sync.Mutex
clock mclock.Clock
childEventCb func(event Event)
timeouts map[ID]mclock.Timer
lastID ID
}
// Name implements request.Server
func (s *serverWithTimeout) Name() string {
return s.parent.Name()
}
// init initializes serverWithTimeout
func (s *serverWithTimeout) init(clock mclock.Clock) {
s.clock = clock
s.timeouts = make(map[ID]mclock.Timer)
}
// subscribe subscribes to events which include parent (requestServer) events
// plus EvTimeout.
func (s *serverWithTimeout) subscribe(eventCallback func(event Event)) {
s.lock.Lock()
defer s.lock.Unlock()
s.childEventCb = eventCallback
s.parent.Subscribe(s.eventCallback)
}
// sendRequest generated a new request ID, emits EvRequest, sets up the timeout
// timer, then sends the request through the parent (requestServer).
func (s *serverWithTimeout) sendRequest(request Request) (reqId ID) {
s.lock.Lock()
s.lastID++
id := s.lastID
s.startTimeout(RequestResponse{ID: id, Request: request})
s.lock.Unlock()
s.parent.SendRequest(id, request)
return id
}
// eventCallback is called by parent (requestServer) event subscription.
func (s *serverWithTimeout) eventCallback(event Event) {
s.lock.Lock()
defer s.lock.Unlock()
switch event.Type {
case EvResponse, EvFail:
id := event.Data.(RequestResponse).ID
if timer, ok := s.timeouts[id]; ok {
// Note: if stopping the timer is unsuccessful then the resulting AfterFunc
// call will just do nothing
timer.Stop()
delete(s.timeouts, id)
if s.childEventCb != nil {
s.childEventCb(event)
}
}
default:
if s.childEventCb != nil {
s.childEventCb(event)
}
}
}
// startTimeout starts a timeout timer for the given request.
func (s *serverWithTimeout) startTimeout(reqData RequestResponse) {
id := reqData.ID
s.timeouts[id] = s.clock.AfterFunc(softRequestTimeout, func() {
s.lock.Lock()
if _, ok := s.timeouts[id]; !ok {
s.lock.Unlock()
return
}
s.timeouts[id] = s.clock.AfterFunc(hardRequestTimeout-softRequestTimeout, func() {
s.lock.Lock()
if _, ok := s.timeouts[id]; !ok {
s.lock.Unlock()
return
}
delete(s.timeouts, id)
childEventCb := s.childEventCb
s.lock.Unlock()
if childEventCb != nil {
childEventCb(Event{Type: EvFail, Data: reqData})
}
})
childEventCb := s.childEventCb
s.lock.Unlock()
if childEventCb != nil {
childEventCb(Event{Type: EvTimeout, Data: reqData})
}
})
}
// unsubscribe stops all goroutines associated with the server.
func (s *serverWithTimeout) unsubscribe() {
s.lock.Lock()
for _, timer := range s.timeouts {
if timer != nil {
timer.Stop()
}
}
s.lock.Unlock()
s.parent.Unsubscribe()
}
// serverWithLimits wraps serverWithTimeout and implements server. It limits the
// number of parallel in-flight requests and prevents sending new requests when a
// pending one has already timed out. Server events are also rate limited.
// It also implements a failure delay mechanism that adds an exponentially growing
// delay each time a request fails (wrong answer or hard timeout). This makes the
// syncing mechanism less brittle as temporary failures of the server might happen
// sometimes, but still avoids hammering a non-functional server with requests.
type serverWithLimits struct {
serverWithTimeout
lock sync.Mutex
childEventCb func(event Event)
softTimeouts map[ID]struct{}
pendingCount, timeoutCount int
parallelLimit float32
sendEvent bool
delayTimer mclock.Timer
delayCounter int
failureDelayEnd mclock.AbsTime
failureDelay float64
serverEventBuffer int
eventBufferUpdated mclock.AbsTime
}
// init initializes serverWithLimits
func (s *serverWithLimits) init() {
s.softTimeouts = make(map[ID]struct{})
s.parallelLimit = defaultParallelLimit
s.serverEventBuffer = maxServerEventBuffer
}
// subscribe subscribes to events which include parent (serverWithTimeout) events
// plus EvCanRequestAgain.
func (s *serverWithLimits) subscribe(eventCallback func(event Event)) {
s.lock.Lock()
defer s.lock.Unlock()
s.childEventCb = eventCallback
s.serverWithTimeout.subscribe(s.eventCallback)
}
// eventCallback is called by parent (serverWithTimeout) event subscription.
func (s *serverWithLimits) eventCallback(event Event) {
s.lock.Lock()
var sendCanRequestAgain bool
passEvent := true
switch event.Type {
case EvTimeout:
id := event.Data.(RequestResponse).ID
s.softTimeouts[id] = struct{}{}
s.timeoutCount++
s.parallelLimit -= parallelAdjustDown
if s.parallelLimit < minParallelLimit {
s.parallelLimit = minParallelLimit
}
log.Debug("Server timeout", "count", s.timeoutCount, "parallelLimit", s.parallelLimit)
case EvResponse, EvFail:
id := event.Data.(RequestResponse).ID
if _, ok := s.softTimeouts[id]; ok {
delete(s.softTimeouts, id)
s.timeoutCount--
log.Debug("Server timeout finalized", "count", s.timeoutCount, "parallelLimit", s.parallelLimit)
}
if event.Type == EvResponse && s.pendingCount >= int(s.parallelLimit) {
s.parallelLimit += parallelAdjustUp
}
s.pendingCount--
if s.canRequest() {
sendCanRequestAgain = s.sendEvent
s.sendEvent = false
}
if event.Type == EvFail {
s.failLocked("failed request")
}
default:
// server event; check rate limit
if s.serverEventBuffer < maxServerEventBuffer {
now := s.clock.Now()
sinceUpdate := time.Duration(now - s.eventBufferUpdated)
if sinceUpdate >= maxServerEventRate*time.Duration(maxServerEventBuffer-s.serverEventBuffer) {
s.serverEventBuffer = maxServerEventBuffer
s.eventBufferUpdated = now
} else {
addBuffer := int(sinceUpdate / maxServerEventRate)
s.serverEventBuffer += addBuffer
s.eventBufferUpdated += mclock.AbsTime(maxServerEventRate * time.Duration(addBuffer))
}
}
if s.serverEventBuffer > 0 {
s.serverEventBuffer--
} else {
passEvent = false
}
}
childEventCb := s.childEventCb
s.lock.Unlock()
if passEvent && childEventCb != nil {
childEventCb(event)
}
if sendCanRequestAgain && childEventCb != nil {
childEventCb(Event{Type: EvCanRequestAgain})
}
}
// sendRequest sends a request through the parent (serverWithTimeout).
func (s *serverWithLimits) sendRequest(request Request) (reqId ID) {
s.lock.Lock()
s.pendingCount++
s.lock.Unlock()
return s.serverWithTimeout.sendRequest(request)
}
// unsubscribe stops all goroutines associated with the server.
func (s *serverWithLimits) unsubscribe() {
s.lock.Lock()
if s.delayTimer != nil {
s.delayTimer.Stop()
s.delayTimer = nil
}
s.childEventCb = nil
s.lock.Unlock()
s.serverWithTimeout.unsubscribe()
}
// canRequest checks whether a new request can be started.
func (s *serverWithLimits) canRequest() bool {
if s.delayTimer != nil || s.pendingCount >= int(s.parallelLimit) || s.timeoutCount > 0 {
return false
}
if s.parallelLimit < minParallelLimit {
s.parallelLimit = minParallelLimit
}
return true
}
// canRequestNow checks whether a new request can be started, according to the
// current in-flight request count and parallelLimit, and also the failure delay
// timer.
// If it returns false then it is guaranteed that an EvCanRequestAgain will be
// sent whenever the server becomes available for requesting again.
func (s *serverWithLimits) canRequestNow() bool {
var sendCanRequestAgain bool
s.lock.Lock()
canRequest := s.canRequest()
if canRequest {
sendCanRequestAgain = s.sendEvent
s.sendEvent = false
}
childEventCb := s.childEventCb
s.lock.Unlock()
if sendCanRequestAgain && childEventCb != nil {
childEventCb(Event{Type: EvCanRequestAgain})
}
return canRequest
}
// delay sets the delay timer to the given duration, disabling new requests for
// the given period.
func (s *serverWithLimits) delay(delay time.Duration) {
if s.delayTimer != nil {
// Note: if stopping the timer is unsuccessful then the resulting AfterFunc
// call will just do nothing
s.delayTimer.Stop()
s.delayTimer = nil
}
s.delayCounter++
delayCounter := s.delayCounter
log.Debug("Server delay started", "length", delay)
s.delayTimer = s.clock.AfterFunc(delay, func() {
log.Debug("Server delay ended", "length", delay)
var sendCanRequestAgain bool
s.lock.Lock()
if s.delayTimer != nil && s.delayCounter == delayCounter { // do nothing if there is a new timer now
s.delayTimer = nil
if s.canRequest() {
sendCanRequestAgain = s.sendEvent
s.sendEvent = false
}
}
childEventCb := s.childEventCb
s.lock.Unlock()
if sendCanRequestAgain && childEventCb != nil {
childEventCb(Event{Type: EvCanRequestAgain})
}
})
}
// fail reports that a response from the server was found invalid by the processing
// Module, disabling new requests for a dynamically adjusted time period.
func (s *serverWithLimits) fail(desc string) {
s.lock.Lock()
defer s.lock.Unlock()
s.failLocked(desc)
}
// failLocked calculates the dynamic failure delay and applies it.
func (s *serverWithLimits) failLocked(desc string) {
log.Debug("Server error", "description", desc)
s.failureDelay *= 2
now := s.clock.Now()
if now > s.failureDelayEnd {
s.failureDelay *= math.Pow(2, -float64(now-s.failureDelayEnd)/float64(maxFailureDelay))
}
if s.failureDelay < float64(minFailureDelay) {
s.failureDelay = float64(minFailureDelay)
}
s.failureDelayEnd = now + mclock.AbsTime(s.failureDelay)
s.delay(time.Duration(s.failureDelay))
}

@ -0,0 +1,182 @@
package request
import (
"testing"
"github.com/ethereum/go-ethereum/common/mclock"
)
const (
testRequest = "Life, the Universe, and Everything"
testResponse = 42
)
var testEventType = &EventType{Name: "testEvent"}
func TestServerEvents(t *testing.T) {
rs := &testRequestServer{}
clock := &mclock.Simulated{}
srv := NewServer(rs, clock)
var lastEventType *EventType
srv.subscribe(func(event Event) { lastEventType = event.Type })
evTypeName := func(evType *EventType) string {
if evType == nil {
return "none"
}
return evType.Name
}
expEvent := func(expType *EventType) {
if lastEventType != expType {
t.Errorf("Wrong event type (expected %s, got %s)", evTypeName(expType), evTypeName(lastEventType))
}
lastEventType = nil
}
// user events should simply be passed through
rs.eventCb(Event{Type: testEventType})
expEvent(testEventType)
// send request, soft timeout, then valid response
srv.sendRequest(testRequest)
clock.WaitForTimers(1)
clock.Run(softRequestTimeout)
expEvent(EvTimeout)
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
expEvent(EvResponse)
// send request, hard timeout (response after hard timeout should be ignored)
srv.sendRequest(testRequest)
clock.WaitForTimers(1)
clock.Run(softRequestTimeout)
expEvent(EvTimeout)
clock.WaitForTimers(1)
clock.Run(hardRequestTimeout)
expEvent(EvFail)
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
expEvent(nil)
srv.unsubscribe()
}
func TestServerParallel(t *testing.T) {
rs := &testRequestServer{}
srv := NewServer(rs, &mclock.Simulated{})
srv.subscribe(func(event Event) {})
expSend := func(expSent int) {
var sent int
for sent <= expSent {
if !srv.canRequestNow() {
break
}
sent++
srv.sendRequest(testRequest)
}
if sent != expSent {
t.Errorf("Wrong number of parallel requests accepted (expected %d, got %d)", expSent, sent)
}
}
// max out parallel allowance
expSend(defaultParallelLimit)
// 1 answered, should accept 1 more
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
expSend(1)
// 2 answered, should accept 2 more
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 2, Request: testRequest, Response: testResponse}})
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 3, Request: testRequest, Response: testResponse}})
expSend(2)
// failed request, should decrease allowance and not accept more
rs.eventCb(Event{Type: EvFail, Data: RequestResponse{ID: 4, Request: testRequest}})
expSend(0)
srv.unsubscribe()
}
func TestServerFail(t *testing.T) {
rs := &testRequestServer{}
clock := &mclock.Simulated{}
srv := NewServer(rs, clock)
srv.subscribe(func(event Event) {})
expCanRequest := func(expCanRequest bool) {
if canRequest := srv.canRequestNow(); canRequest != expCanRequest {
t.Errorf("Wrong result for canRequestNow (expected %v, got %v)", expCanRequest, canRequest)
}
}
// timed out request
expCanRequest(true)
srv.sendRequest(testRequest)
clock.WaitForTimers(1)
expCanRequest(true)
clock.Run(softRequestTimeout)
expCanRequest(false) // cannot request when there is a timed out request
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
expCanRequest(true)
// explicit server.Fail
srv.fail("")
clock.WaitForTimers(1)
expCanRequest(false) // cannot request for a while after a failure
clock.Run(minFailureDelay)
expCanRequest(true)
// request returned with EvFail
srv.sendRequest(testRequest)
rs.eventCb(Event{Type: EvFail, Data: RequestResponse{ID: 2, Request: testRequest}})
clock.WaitForTimers(1)
expCanRequest(false) // EvFail should also start failure delay
clock.Run(minFailureDelay)
expCanRequest(false) // second failure delay is longer, should still be disabled
clock.Run(minFailureDelay)
expCanRequest(true)
srv.unsubscribe()
}
func TestServerEventRateLimit(t *testing.T) {
rs := &testRequestServer{}
clock := &mclock.Simulated{}
srv := NewServer(rs, clock)
var eventCount int
srv.subscribe(func(event Event) {
eventCount++
})
expEvents := func(send, expAllowed int) {
eventCount = 0
for sent := 0; sent < send; sent++ {
rs.eventCb(Event{Type: testEventType})
}
if eventCount != expAllowed {
t.Errorf("Wrong number of server events passing rate limitation (sent %d, expected %d, got %d)", send, expAllowed, eventCount)
}
}
expEvents(maxServerEventBuffer+5, maxServerEventBuffer)
clock.Run(maxServerEventRate)
expEvents(5, 1)
clock.Run(maxServerEventRate * maxServerEventBuffer * 2)
expEvents(maxServerEventBuffer+5, maxServerEventBuffer)
srv.unsubscribe()
}
func TestServerUnsubscribe(t *testing.T) {
rs := &testRequestServer{}
clock := &mclock.Simulated{}
srv := NewServer(rs, clock)
var eventCount int
srv.subscribe(func(event Event) {
eventCount++
})
eventCb := rs.eventCb
eventCb(Event{Type: testEventType})
if eventCount != 1 {
t.Errorf("Server event callback not called before unsubscribe")
}
srv.unsubscribe()
if rs.eventCb != nil {
t.Errorf("Server event callback not removed after unsubscribe")
}
eventCb(Event{Type: testEventType})
if eventCount != 1 {
t.Errorf("Server event callback called after unsubscribe")
}
}
type testRequestServer struct {
eventCb func(Event)
}
func (rs *testRequestServer) Name() string { return "" }
func (rs *testRequestServer) Subscribe(eventCb func(Event)) { rs.eventCb = eventCb }
func (rs *testRequestServer) SendRequest(ID, Request) {}
func (rs *testRequestServer) Unsubscribe() { rs.eventCb = nil }

@ -0,0 +1,202 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/log"
)
type headTracker interface {
ValidateOptimistic(update types.OptimisticUpdate) (bool, error)
ValidateFinality(head types.FinalityUpdate) (bool, error)
ValidatedFinality() (types.FinalityUpdate, bool)
SetPrefetchHead(head types.HeadInfo)
}
// HeadSync implements request.Module; it updates the validated and prefetch
// heads of HeadTracker based on the EvHead and EvSignedHead events coming from
// registered servers.
// It can also postpone the validation of the latest announced signed head
// until the committee chain is synced up to at least the required period.
type HeadSync struct {
headTracker headTracker
chain committeeChain
nextSyncPeriod uint64
chainInit bool
unvalidatedOptimistic map[request.Server]types.OptimisticUpdate
unvalidatedFinality map[request.Server]types.FinalityUpdate
serverHeads map[request.Server]types.HeadInfo
reqFinalityEpoch map[request.Server]uint64 // next epoch to request finality update
headServerCount map[types.HeadInfo]headServerCount
headCounter uint64
prefetchHead types.HeadInfo
}
// headServerCount is associated with most recently seen head infos; it counts
// the number of servers currently having the given head info as their announced
// head and a counter signaling how recent that head is.
// This data is used for selecting the prefetch head.
type headServerCount struct {
serverCount int
headCounter uint64
}
// NewHeadSync creates a new HeadSync.
func NewHeadSync(headTracker headTracker, chain committeeChain) *HeadSync {
s := &HeadSync{
headTracker: headTracker,
chain: chain,
unvalidatedOptimistic: make(map[request.Server]types.OptimisticUpdate),
unvalidatedFinality: make(map[request.Server]types.FinalityUpdate),
serverHeads: make(map[request.Server]types.HeadInfo),
headServerCount: make(map[types.HeadInfo]headServerCount),
reqFinalityEpoch: make(map[request.Server]uint64),
}
return s
}
// Process implements request.Module.
func (s *HeadSync) Process(requester request.Requester, events []request.Event) {
nextPeriod, chainInit := s.chain.NextSyncPeriod()
if nextPeriod != s.nextSyncPeriod || chainInit != s.chainInit {
s.nextSyncPeriod, s.chainInit = nextPeriod, chainInit
s.processUnvalidatedUpdates()
}
for _, event := range events {
switch event.Type {
case EvNewHead:
s.setServerHead(event.Server, event.Data.(types.HeadInfo))
case EvNewOptimisticUpdate:
update := event.Data.(types.OptimisticUpdate)
s.newOptimisticUpdate(event.Server, update)
epoch := update.Attested.Epoch()
if epoch < s.reqFinalityEpoch[event.Server] {
continue
}
if finality, ok := s.headTracker.ValidatedFinality(); ok && finality.Attested.Header.Epoch() >= epoch {
continue
}
requester.Send(event.Server, ReqFinality{})
s.reqFinalityEpoch[event.Server] = epoch + 1
case EvNewFinalityUpdate:
s.newFinalityUpdate(event.Server, event.Data.(types.FinalityUpdate))
case request.EvResponse:
_, _, resp := event.RequestInfo()
s.newFinalityUpdate(event.Server, resp.(types.FinalityUpdate))
case request.EvUnregistered:
s.setServerHead(event.Server, types.HeadInfo{})
delete(s.serverHeads, event.Server)
delete(s.unvalidatedOptimistic, event.Server)
delete(s.unvalidatedFinality, event.Server)
}
}
}
// newOptimisticUpdate handles received optimistic update; either validates it if
// the chain is properly synced or stores it for further validation.
func (s *HeadSync) newOptimisticUpdate(server request.Server, optimisticUpdate types.OptimisticUpdate) {
if !s.chainInit || types.SyncPeriod(optimisticUpdate.SignatureSlot) > s.nextSyncPeriod {
s.unvalidatedOptimistic[server] = optimisticUpdate
return
}
if _, err := s.headTracker.ValidateOptimistic(optimisticUpdate); err != nil {
log.Debug("Error validating optimistic update", "error", err)
}
}
// newFinalityUpdate handles received finality update; either validates it if
// the chain is properly synced or stores it for further validation.
func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) {
if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod {
s.unvalidatedFinality[server] = finalityUpdate
return
}
if _, err := s.headTracker.ValidateFinality(finalityUpdate); err != nil {
log.Debug("Error validating finality update", "error", err)
}
}
// processUnvalidatedUpdates iterates the list of unvalidated updates and validates
// those which can be validated.
func (s *HeadSync) processUnvalidatedUpdates() {
if !s.chainInit {
return
}
for server, optimisticUpdate := range s.unvalidatedOptimistic {
if types.SyncPeriod(optimisticUpdate.SignatureSlot) <= s.nextSyncPeriod {
if _, err := s.headTracker.ValidateOptimistic(optimisticUpdate); err != nil {
log.Debug("Error validating deferred optimistic update", "error", err)
}
delete(s.unvalidatedOptimistic, server)
}
}
for server, finalityUpdate := range s.unvalidatedFinality {
if types.SyncPeriod(finalityUpdate.SignatureSlot) <= s.nextSyncPeriod {
if _, err := s.headTracker.ValidateFinality(finalityUpdate); err != nil {
log.Debug("Error validating deferred finality update", "error", err)
}
delete(s.unvalidatedFinality, server)
}
}
}
// setServerHead processes non-validated server head announcements and updates
// the prefetch head if necessary.
func (s *HeadSync) setServerHead(server request.Server, head types.HeadInfo) bool {
if oldHead, ok := s.serverHeads[server]; ok {
if head == oldHead {
return false
}
h := s.headServerCount[oldHead]
if h.serverCount--; h.serverCount > 0 {
s.headServerCount[oldHead] = h
} else {
delete(s.headServerCount, oldHead)
}
}
if head != (types.HeadInfo{}) {
h, ok := s.headServerCount[head]
if !ok {
s.headCounter++
h.headCounter = s.headCounter
}
h.serverCount++
s.headServerCount[head] = h
s.serverHeads[server] = head
} else {
delete(s.serverHeads, server)
}
var (
bestHead types.HeadInfo
bestHeadInfo headServerCount
)
for head, headServerCount := range s.headServerCount {
if headServerCount.serverCount > bestHeadInfo.serverCount ||
(headServerCount.serverCount == bestHeadInfo.serverCount && headServerCount.headCounter > bestHeadInfo.headCounter) {
bestHead, bestHeadInfo = head, headServerCount
}
}
if bestHead == s.prefetchHead {
return false
}
s.prefetchHead = bestHead
s.headTracker.SetPrefetchHead(bestHead)
return true
}

@ -0,0 +1,183 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"testing"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
)
var (
testServer1 = testServer("testServer1")
testServer2 = testServer("testServer2")
testServer3 = testServer("testServer3")
testServer4 = testServer("testServer4")
testServer5 = testServer("testServer5")
testHead0 = types.HeadInfo{}
testHead1 = types.HeadInfo{Slot: 123, BlockRoot: common.Hash{1}}
testHead2 = types.HeadInfo{Slot: 124, BlockRoot: common.Hash{2}}
testHead3 = types.HeadInfo{Slot: 124, BlockRoot: common.Hash{3}}
testHead4 = types.HeadInfo{Slot: 125, BlockRoot: common.Hash{4}}
testOptUpdate1 = types.OptimisticUpdate{SignatureSlot: 0x0124, Attested: types.HeaderWithExecProof{Header: types.Header{Slot: 0x0123, StateRoot: common.Hash{1}}}}
testOptUpdate2 = types.OptimisticUpdate{SignatureSlot: 0x2010, Attested: types.HeaderWithExecProof{Header: types.Header{Slot: 0x200e, StateRoot: common.Hash{2}}}}
// testOptUpdate3 is at the end of period 1 but signed in period 2
testOptUpdate3 = types.OptimisticUpdate{SignatureSlot: 0x4000, Attested: types.HeaderWithExecProof{Header: types.Header{Slot: 0x3fff, StateRoot: common.Hash{3}}}}
testOptUpdate4 = types.OptimisticUpdate{SignatureSlot: 0x6444, Attested: types.HeaderWithExecProof{Header: types.Header{Slot: 0x6443, StateRoot: common.Hash{4}}}}
)
func finality(opt types.OptimisticUpdate) types.FinalityUpdate {
return types.FinalityUpdate{
SignatureSlot: opt.SignatureSlot,
Attested: opt.Attested,
Finalized: types.HeaderWithExecProof{Header: types.Header{Slot: (opt.Attested.Header.Slot - 64) & uint64(0xffffffffffffffe0)}},
}
}
type testServer string
func (t testServer) Name() string {
return string(t)
}
func TestValidatedHead(t *testing.T) {
chain := &TestCommitteeChain{}
ht := &TestHeadTracker{}
headSync := NewHeadSync(ht, chain)
ts := NewTestScheduler(t, headSync)
ht.ExpValidated(t, 0, nil)
ts.AddServer(testServer1, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, testOptUpdate1)
ts.Run(1, testServer1, ReqFinality{})
// announced head should be queued because of uninitialized chain
ht.ExpValidated(t, 1, nil)
chain.SetNextSyncPeriod(0) // initialize chain
ts.Run(2)
// expect previously queued head to be validated
ht.ExpValidated(t, 2, []types.OptimisticUpdate{testOptUpdate1})
chain.SetNextSyncPeriod(1)
ts.ServerEvent(EvNewFinalityUpdate, testServer1, finality(testOptUpdate2))
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, testOptUpdate2)
ts.AddServer(testServer2, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer2, testOptUpdate2)
ts.Run(3)
// expect both head announcements to be validated instantly
ht.ExpValidated(t, 3, []types.OptimisticUpdate{testOptUpdate2, testOptUpdate2})
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, testOptUpdate3)
ts.AddServer(testServer3, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer3, testOptUpdate4)
// finality should be requested from both servers
ts.Run(4, testServer1, ReqFinality{}, testServer3, ReqFinality{})
// future period announced heads should be queued
ht.ExpValidated(t, 4, nil)
chain.SetNextSyncPeriod(2)
ts.Run(5)
// testOptUpdate3 can be validated now but not testOptUpdate4
ht.ExpValidated(t, 5, []types.OptimisticUpdate{testOptUpdate3})
ts.AddServer(testServer4, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer4, testOptUpdate3)
// new server joined with recent optimistic update but still no finality; should be requested
ts.Run(6, testServer4, ReqFinality{})
ht.ExpValidated(t, 6, []types.OptimisticUpdate{testOptUpdate3})
ts.AddServer(testServer5, 1)
ts.RequestEvent(request.EvResponse, ts.Request(6, 1), finality(testOptUpdate3))
ts.ServerEvent(EvNewOptimisticUpdate, testServer5, testOptUpdate3)
// finality update request answered; new server should not be requested
ts.Run(7)
ht.ExpValidated(t, 7, []types.OptimisticUpdate{testOptUpdate3})
// server 3 disconnected without proving period 3, its announced head should be dropped
ts.RemoveServer(testServer3)
ts.Run(8)
ht.ExpValidated(t, 8, nil)
chain.SetNextSyncPeriod(3)
ts.Run(9)
// testOptUpdate4 could be validated now but it's not queued by any registered server
ht.ExpValidated(t, 9, nil)
ts.ServerEvent(EvNewFinalityUpdate, testServer2, finality(testOptUpdate4))
ts.ServerEvent(EvNewOptimisticUpdate, testServer2, testOptUpdate4)
ts.Run(10)
// now testOptUpdate4 should be validated
ht.ExpValidated(t, 10, []types.OptimisticUpdate{testOptUpdate4})
}
func TestPrefetchHead(t *testing.T) {
chain := &TestCommitteeChain{}
ht := &TestHeadTracker{}
headSync := NewHeadSync(ht, chain)
ts := NewTestScheduler(t, headSync)
ht.ExpPrefetch(t, 0, testHead0) // no servers registered
ts.AddServer(testServer1, 1)
ts.ServerEvent(EvNewHead, testServer1, testHead1)
ts.Run(1)
ht.ExpPrefetch(t, 1, testHead1) // s1: h1
ts.AddServer(testServer2, 1)
ts.ServerEvent(EvNewHead, testServer2, testHead2)
ts.Run(2)
ht.ExpPrefetch(t, 2, testHead2) // s1: h1, s2: h2
ts.ServerEvent(EvNewHead, testServer1, testHead2)
ts.Run(3)
ht.ExpPrefetch(t, 3, testHead2) // s1: h2, s2: h2
ts.AddServer(testServer3, 1)
ts.ServerEvent(EvNewHead, testServer3, testHead3)
ts.Run(4)
ht.ExpPrefetch(t, 4, testHead2) // s1: h2, s2: h2, s3: h3
ts.AddServer(testServer4, 1)
ts.ServerEvent(EvNewHead, testServer4, testHead4)
ts.Run(5)
ht.ExpPrefetch(t, 5, testHead2) // s1: h2, s2: h2, s3: h3, s4: h4
ts.ServerEvent(EvNewHead, testServer2, testHead3)
ts.Run(6)
ht.ExpPrefetch(t, 6, testHead3) // s1: h2, s2: h3, s3: h3, s4: h4
ts.RemoveServer(testServer3)
ts.Run(7)
ht.ExpPrefetch(t, 7, testHead4) // s1: h2, s2: h3, s4: h4
ts.RemoveServer(testServer1)
ts.Run(8)
ht.ExpPrefetch(t, 8, testHead4) // s2: h3, s4: h4
ts.RemoveServer(testServer4)
ts.Run(9)
ht.ExpPrefetch(t, 9, testHead3) // s2: h3
ts.RemoveServer(testServer2)
ts.Run(10)
ht.ExpPrefetch(t, 10, testHead0) // no servers registered
}

@ -0,0 +1,259 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"reflect"
"testing"
"github.com/ethereum/go-ethereum/beacon/light"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
)
type requestWithID struct {
sid request.ServerAndID
request request.Request
}
type TestScheduler struct {
t *testing.T
module request.Module
events []request.Event
servers []request.Server
allowance map[request.Server]int
sent map[int][]requestWithID
testIndex int
expFail map[request.Server]int // expected Server.Fail calls during next Run
lastId request.ID
}
func NewTestScheduler(t *testing.T, module request.Module) *TestScheduler {
return &TestScheduler{
t: t,
module: module,
allowance: make(map[request.Server]int),
expFail: make(map[request.Server]int),
sent: make(map[int][]requestWithID),
}
}
func (ts *TestScheduler) Run(testIndex int, exp ...any) {
expReqs := make([]requestWithID, len(exp)/2)
id := ts.lastId
for i := range expReqs {
id++
expReqs[i] = requestWithID{
sid: request.ServerAndID{Server: exp[i*2].(request.Server), ID: id},
request: exp[i*2+1].(request.Request),
}
}
if len(expReqs) == 0 {
expReqs = nil
}
ts.testIndex = testIndex
ts.module.Process(ts, ts.events)
ts.events = nil
for server, count := range ts.expFail {
delete(ts.expFail, server)
if count == 0 {
continue
}
ts.t.Errorf("Missing %d Server.Fail(s) from server %s in test case #%d", count, server.Name(), testIndex)
}
if !reflect.DeepEqual(ts.sent[testIndex], expReqs) {
ts.t.Errorf("Wrong sent requests in test case #%d (expected %v, got %v)", testIndex, expReqs, ts.sent[testIndex])
}
}
func (ts *TestScheduler) CanSendTo() (cs []request.Server) {
for _, server := range ts.servers {
if ts.allowance[server] > 0 {
cs = append(cs, server)
}
}
return
}
func (ts *TestScheduler) Send(server request.Server, req request.Request) request.ID {
ts.lastId++
ts.sent[ts.testIndex] = append(ts.sent[ts.testIndex], requestWithID{
sid: request.ServerAndID{Server: server, ID: ts.lastId},
request: req,
})
ts.allowance[server]--
return ts.lastId
}
func (ts *TestScheduler) Fail(server request.Server, desc string) {
if ts.expFail[server] == 0 {
ts.t.Errorf("Unexpected Fail from server %s in test case #%d: %s", server.Name(), ts.testIndex, desc)
return
}
ts.expFail[server]--
}
func (ts *TestScheduler) Request(testIndex, reqIndex int) requestWithID {
if len(ts.sent[testIndex]) < reqIndex {
ts.t.Errorf("Missing request from test case %d index %d", testIndex, reqIndex)
return requestWithID{}
}
return ts.sent[testIndex][reqIndex-1]
}
func (ts *TestScheduler) ServerEvent(evType *request.EventType, server request.Server, data any) {
ts.events = append(ts.events, request.Event{
Type: evType,
Server: server,
Data: data,
})
}
func (ts *TestScheduler) RequestEvent(evType *request.EventType, req requestWithID, resp request.Response) {
if req.request == nil {
return
}
ts.events = append(ts.events, request.Event{
Type: evType,
Server: req.sid.Server,
Data: request.RequestResponse{
ID: req.sid.ID,
Request: req.request,
Response: resp,
},
})
}
func (ts *TestScheduler) AddServer(server request.Server, allowance int) {
ts.servers = append(ts.servers, server)
ts.allowance[server] = allowance
ts.ServerEvent(request.EvRegistered, server, nil)
}
func (ts *TestScheduler) RemoveServer(server request.Server) {
ts.servers = append(ts.servers, server)
for i, s := range ts.servers {
if s == server {
copy(ts.servers[i:len(ts.servers)-1], ts.servers[i+1:])
ts.servers = ts.servers[:len(ts.servers)-1]
break
}
}
delete(ts.allowance, server)
ts.ServerEvent(request.EvUnregistered, server, nil)
}
func (ts *TestScheduler) AddAllowance(server request.Server, allowance int) {
ts.allowance[server] += allowance
}
func (ts *TestScheduler) ExpFail(server request.Server) {
ts.expFail[server]++
}
type TestCommitteeChain struct {
fsp, nsp uint64
init bool
}
func (tc *TestCommitteeChain) CheckpointInit(bootstrap types.BootstrapData) error {
tc.fsp, tc.nsp, tc.init = bootstrap.Header.SyncPeriod(), bootstrap.Header.SyncPeriod()+2, true
return nil
}
func (tc *TestCommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error {
period := update.AttestedHeader.Header.SyncPeriod()
if period < tc.fsp || period > tc.nsp || !tc.init {
return light.ErrInvalidPeriod
}
if period == tc.nsp {
tc.nsp++
}
return nil
}
func (tc *TestCommitteeChain) NextSyncPeriod() (uint64, bool) {
return tc.nsp, tc.init
}
func (tc *TestCommitteeChain) ExpInit(t *testing.T, ExpInit bool) {
if tc.init != ExpInit {
t.Errorf("Incorrect init flag (expected %v, got %v)", ExpInit, tc.init)
}
}
func (tc *TestCommitteeChain) SetNextSyncPeriod(nsp uint64) {
tc.init, tc.nsp = true, nsp
}
func (tc *TestCommitteeChain) ExpNextSyncPeriod(t *testing.T, expNsp uint64) {
tc.ExpInit(t, true)
if tc.nsp != expNsp {
t.Errorf("Incorrect NextSyncPeriod (expected %d, got %d)", expNsp, tc.nsp)
}
}
type TestHeadTracker struct {
phead types.HeadInfo
validated []types.OptimisticUpdate
finality types.FinalityUpdate
}
func (ht *TestHeadTracker) ValidateOptimistic(update types.OptimisticUpdate) (bool, error) {
ht.validated = append(ht.validated, update)
return true, nil
}
func (ht *TestHeadTracker) ValidateFinality(update types.FinalityUpdate) (bool, error) {
ht.finality = update
return true, nil
}
func (ht *TestHeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
return ht.finality, ht.finality.Attested.Header != (types.Header{})
}
func (ht *TestHeadTracker) ExpValidated(t *testing.T, tci int, expHeads []types.OptimisticUpdate) {
for i, expHead := range expHeads {
if i >= len(ht.validated) {
t.Errorf("Missing validated head in test case #%d index #%d (expected {slot %d blockRoot %x}, got none)", tci, i, expHead.Attested.Header.Slot, expHead.Attested.Header.Hash())
continue
}
if !reflect.DeepEqual(ht.validated[i], expHead) {
vhead := ht.validated[i].Attested.Header
t.Errorf("Wrong validated head in test case #%d index #%d (expected {slot %d blockRoot %x}, got {slot %d blockRoot %x})", tci, i, expHead.Attested.Header.Slot, expHead.Attested.Header.Hash(), vhead.Slot, vhead.Hash())
}
}
for i := len(expHeads); i < len(ht.validated); i++ {
vhead := ht.validated[i].Attested.Header
t.Errorf("Unexpected validated head in test case #%d index #%d (expected none, got {slot %d blockRoot %x})", tci, i, vhead.Slot, vhead.Hash())
}
ht.validated = nil
}
func (ht *TestHeadTracker) SetPrefetchHead(head types.HeadInfo) {
ht.phead = head
}
func (ht *TestHeadTracker) ExpPrefetch(t *testing.T, tci int, exp types.HeadInfo) {
if ht.phead != exp {
t.Errorf("Wrong prefetch head in test case #%d (expected {slot %d blockRoot %x}, got {slot %d blockRoot %x})", tci, exp.Slot, exp.BlockRoot, ht.phead.Slot, ht.phead.BlockRoot)
}
}

@ -0,0 +1,47 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
)
var (
EvNewHead = &request.EventType{Name: "newHead"} // data: types.HeadInfo
EvNewOptimisticUpdate = &request.EventType{Name: "newOptimisticUpdate"} // data: types.OptimisticUpdate
EvNewFinalityUpdate = &request.EventType{Name: "newFinalityUpdate"} // data: types.FinalityUpdate
)
type (
ReqUpdates struct {
FirstPeriod, Count uint64
}
RespUpdates struct {
Updates []*types.LightClientUpdate
Committees []*types.SerializedSyncCommittee
}
ReqHeader common.Hash
RespHeader struct {
Header types.Header
Canonical, Finalized bool
}
ReqCheckpointData common.Hash
ReqBeaconBlock common.Hash
ReqFinality struct{}
)

@ -0,0 +1,398 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"sort"
"github.com/ethereum/go-ethereum/beacon/light"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/beacon/types"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
const maxUpdateRequest = 8 // maximum number of updates requested in a single request
type committeeChain interface {
CheckpointInit(bootstrap types.BootstrapData) error
InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error
NextSyncPeriod() (uint64, bool)
}
// CheckpointInit implements request.Module; it fetches the light client bootstrap
// data belonging to the given checkpoint hash and initializes the committee chain
// if successful.
type CheckpointInit struct {
chain committeeChain
checkpointHash common.Hash
locked request.ServerAndID
initialized bool
// per-server state is used to track the state of requesting checkpoint header
// info. Part of this info (canonical and finalized state) is not validated
// and therefore it is requested from each server separately after it has
// reported a missing checkpoint (which is also not validated info).
serverState map[request.Server]serverState
// the following fields are used to determine whether the checkpoint is on
// epoch boundary. This information is validated and therefore stored globally.
parentHash common.Hash
hasEpochInfo, epochBoundary bool
cpSlot, parentSlot uint64
}
const (
ssDefault = iota // no action yet or checkpoint requested
ssNeedHeader // checkpoint req failed, need cp header
ssHeaderRequested // cp header requested
ssNeedParent // cp header slot %32 != 0, need parent to check epoch boundary
ssParentRequested // cp parent header requested
ssPrintStatus // has all necessary info, print log message if init still not successful
ssDone // log message printed, no more action required
)
type serverState struct {
state int
hasHeader, canonical, finalized bool // stored per server because not validated
}
// NewCheckpointInit creates a new CheckpointInit.
func NewCheckpointInit(chain committeeChain, checkpointHash common.Hash) *CheckpointInit {
return &CheckpointInit{
chain: chain,
checkpointHash: checkpointHash,
serverState: make(map[request.Server]serverState),
}
}
// Process implements request.Module.
func (s *CheckpointInit) Process(requester request.Requester, events []request.Event) {
if s.initialized {
return
}
for _, event := range events {
switch event.Type {
case request.EvResponse, request.EvFail, request.EvTimeout:
sid, req, resp := event.RequestInfo()
if s.locked == sid {
s.locked = request.ServerAndID{}
}
if event.Type == request.EvTimeout {
continue
}
switch s.serverState[sid.Server].state {
case ssDefault:
if resp != nil {
if checkpoint := resp.(*types.BootstrapData); checkpoint.Header.Hash() == common.Hash(req.(ReqCheckpointData)) {
s.chain.CheckpointInit(*checkpoint)
s.initialized = true
return
}
requester.Fail(event.Server, "invalid checkpoint data")
}
s.serverState[sid.Server] = serverState{state: ssNeedHeader}
case ssHeaderRequested:
if resp == nil {
s.serverState[sid.Server] = serverState{state: ssPrintStatus}
continue
}
newState := serverState{
hasHeader: true,
canonical: resp.(RespHeader).Canonical,
finalized: resp.(RespHeader).Finalized,
}
s.cpSlot, s.parentHash = resp.(RespHeader).Header.Slot, resp.(RespHeader).Header.ParentRoot
if s.cpSlot%params.EpochLength == 0 {
s.hasEpochInfo, s.epochBoundary = true, true
}
if s.hasEpochInfo {
newState.state = ssPrintStatus
} else {
newState.state = ssNeedParent
}
s.serverState[sid.Server] = newState
case ssParentRequested:
s.parentSlot = resp.(RespHeader).Header.Slot
s.hasEpochInfo, s.epochBoundary = true, s.cpSlot/params.EpochLength > s.parentSlot/params.EpochLength
newState := s.serverState[sid.Server]
newState.state = ssPrintStatus
s.serverState[sid.Server] = newState
}
case request.EvUnregistered:
delete(s.serverState, event.Server)
}
}
// start a request if possible
for _, server := range requester.CanSendTo() {
switch s.serverState[server].state {
case ssDefault:
if s.locked == (request.ServerAndID{}) {
id := requester.Send(server, ReqCheckpointData(s.checkpointHash))
s.locked = request.ServerAndID{Server: server, ID: id}
}
case ssNeedHeader:
requester.Send(server, ReqHeader(s.checkpointHash))
newState := s.serverState[server]
newState.state = ssHeaderRequested
s.serverState[server] = newState
case ssNeedParent:
requester.Send(server, ReqHeader(s.parentHash))
newState := s.serverState[server]
newState.state = ssParentRequested
s.serverState[server] = newState
}
}
// print log message if necessary
for server, state := range s.serverState {
if state.state != ssPrintStatus {
continue
}
switch {
case !state.hasHeader:
log.Error("blsync: checkpoint block is not available, reported as unknown", "server", server.Name())
case !state.canonical:
log.Error("blsync: checkpoint block is not available, reported as non-canonical", "server", server.Name())
case !s.hasEpochInfo:
// should be available if hasHeader is true and state is ssPrintStatus
panic("checkpoint epoch info not available when printing retrieval status")
case !s.epochBoundary:
log.Error("blsync: checkpoint block is not first of epoch", "slot", s.cpSlot, "parent", s.parentSlot, "server", server.Name())
case !state.finalized:
log.Error("blsync: checkpoint block is reported as non-finalized", "server", server.Name())
default:
log.Error("blsync: checkpoint not available, but reported as finalized; specified checkpoint hash might be too old", "server", server.Name())
}
s.serverState[server] = serverState{state: ssDone}
}
}
// ForwardUpdateSync implements request.Module; it fetches updates between the
// committee chain head and each server's announced head. Updates are fetched
// in batches and multiple batches can also be requested in parallel.
// Out of order responses are also handled; if a batch of updates cannot be added
// to the chain immediately because of a gap then the future updates are
// remembered until they can be processed.
type ForwardUpdateSync struct {
chain committeeChain
rangeLock rangeLock
lockedIDs map[request.ServerAndID]struct{}
processQueue []updateResponse
nextSyncPeriod map[request.Server]uint64
}
// NewForwardUpdateSync creates a new ForwardUpdateSync.
func NewForwardUpdateSync(chain committeeChain) *ForwardUpdateSync {
return &ForwardUpdateSync{
chain: chain,
rangeLock: make(rangeLock),
lockedIDs: make(map[request.ServerAndID]struct{}),
nextSyncPeriod: make(map[request.Server]uint64),
}
}
// rangeLock allows locking sections of an integer space, preventing the syncing
// mechanism from making requests again for sections where a not timed out request
// is already pending or where already fetched and unprocessed data is available.
type rangeLock map[uint64]int
// lock locks or unlocks the given section, depending on the sign of the add parameter.
func (r rangeLock) lock(first, count uint64, add int) {
for i := first; i < first+count; i++ {
if v := r[i] + add; v > 0 {
r[i] = v
} else {
delete(r, i)
}
}
}
// firstUnlocked returns the first unlocked section starting at or after start
// and not longer than maxCount.
func (r rangeLock) firstUnlocked(start, maxCount uint64) (first, count uint64) {
first = start
for {
if _, ok := r[first]; !ok {
break
}
first++
}
for {
count++
if count == maxCount {
break
}
if _, ok := r[first+count]; ok {
break
}
}
return
}
// lockRange locks the range belonging to the given update request, unless the
// same request has already been locked
func (s *ForwardUpdateSync) lockRange(sid request.ServerAndID, req ReqUpdates) {
if _, ok := s.lockedIDs[sid]; ok {
return
}
s.lockedIDs[sid] = struct{}{}
s.rangeLock.lock(req.FirstPeriod, req.Count, 1)
}
// unlockRange unlocks the range belonging to the given update request, unless
// same request has already been unlocked
func (s *ForwardUpdateSync) unlockRange(sid request.ServerAndID, req ReqUpdates) {
if _, ok := s.lockedIDs[sid]; !ok {
return
}
delete(s.lockedIDs, sid)
s.rangeLock.lock(req.FirstPeriod, req.Count, -1)
}
// verifyRange returns true if the number of updates and the individual update
// periods in the response match the requested section.
func (s *ForwardUpdateSync) verifyRange(request ReqUpdates, response RespUpdates) bool {
if uint64(len(response.Updates)) != request.Count || uint64(len(response.Committees)) != request.Count {
return false
}
for i, update := range response.Updates {
if update.AttestedHeader.Header.SyncPeriod() != request.FirstPeriod+uint64(i) {
return false
}
}
return true
}
// updateResponse is a response that has passed initial verification and has been
// queued for processing. Note that an update response cannot be processed until
// the previous updates have also been added to the chain.
type updateResponse struct {
sid request.ServerAndID
request ReqUpdates
response RespUpdates
}
// updateResponseList implements sort.Sort and sorts update request/response events by FirstPeriod.
type updateResponseList []updateResponse
func (u updateResponseList) Len() int { return len(u) }
func (u updateResponseList) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
func (u updateResponseList) Less(i, j int) bool {
return u[i].request.FirstPeriod < u[j].request.FirstPeriod
}
// Process implements request.Module.
func (s *ForwardUpdateSync) Process(requester request.Requester, events []request.Event) {
for _, event := range events {
switch event.Type {
case request.EvResponse, request.EvFail, request.EvTimeout:
sid, rq, rs := event.RequestInfo()
req := rq.(ReqUpdates)
var queued bool
if event.Type == request.EvResponse {
resp := rs.(RespUpdates)
if s.verifyRange(req, resp) {
// there is a response with a valid format; put it in the process queue
s.processQueue = append(s.processQueue, updateResponse{sid: sid, request: req, response: resp})
s.lockRange(sid, req)
queued = true
} else {
requester.Fail(event.Server, "invalid update range")
}
}
if !queued {
s.unlockRange(sid, req)
}
case EvNewOptimisticUpdate:
update := event.Data.(types.OptimisticUpdate)
s.nextSyncPeriod[event.Server] = types.SyncPeriod(update.SignatureSlot + 256)
case request.EvUnregistered:
delete(s.nextSyncPeriod, event.Server)
}
}
// try processing ordered list of available responses
sort.Sort(updateResponseList(s.processQueue))
for s.processQueue != nil {
u := s.processQueue[0]
if !s.processResponse(requester, u) {
break
}
s.unlockRange(u.sid, u.request)
s.processQueue = s.processQueue[1:]
if len(s.processQueue) == 0 {
s.processQueue = nil
}
}
// start new requests if possible
startPeriod, chainInit := s.chain.NextSyncPeriod()
if !chainInit {
return
}
for {
firstPeriod, maxCount := s.rangeLock.firstUnlocked(startPeriod, maxUpdateRequest)
var (
sendTo request.Server
bestCount uint64
)
for _, server := range requester.CanSendTo() {
nextPeriod := s.nextSyncPeriod[server]
if nextPeriod <= firstPeriod {
continue
}
count := maxCount
if nextPeriod < firstPeriod+maxCount {
count = nextPeriod - firstPeriod
}
if count > bestCount {
sendTo, bestCount = server, count
}
}
if sendTo == nil {
return
}
req := ReqUpdates{FirstPeriod: firstPeriod, Count: bestCount}
id := requester.Send(sendTo, req)
s.lockRange(request.ServerAndID{Server: sendTo, ID: id}, req)
}
}
// processResponse adds the fetched updates and committees to the committee chain.
// Returns true in case of full or partial success.
func (s *ForwardUpdateSync) processResponse(requester request.Requester, u updateResponse) (success bool) {
for i, update := range u.response.Updates {
if err := s.chain.InsertUpdate(update, u.response.Committees[i]); err != nil {
if err == light.ErrInvalidPeriod {
// there is a gap in the update periods; stop processing without
// failing and try again next time
return
}
if err == light.ErrInvalidUpdate || err == light.ErrWrongCommitteeRoot || err == light.ErrCannotReorg {
requester.Fail(u.sid.Server, "invalid update received")
} else {
log.Error("Unexpected InsertUpdate error", "error", err)
}
return
}
success = true
}
return
}

@ -0,0 +1,219 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package sync
import (
"testing"
"github.com/ethereum/go-ethereum/beacon/light/request"
"github.com/ethereum/go-ethereum/beacon/types"
)
func TestCheckpointInit(t *testing.T) {
chain := &TestCommitteeChain{}
checkpoint := &types.BootstrapData{Header: types.Header{Slot: 0x2000*4 + 0x1000}} // period 4
checkpointHash := checkpoint.Header.Hash()
chkInit := NewCheckpointInit(chain, checkpointHash)
ts := NewTestScheduler(t, chkInit)
// add 2 servers
ts.AddServer(testServer1, 1)
ts.AddServer(testServer2, 1)
// expect bootstrap request to server 1
ts.Run(1, testServer1, ReqCheckpointData(checkpointHash))
// server 1 times out; expect request to server 2
ts.RequestEvent(request.EvTimeout, ts.Request(1, 1), nil)
ts.Run(2, testServer2, ReqCheckpointData(checkpointHash))
// invalid response from server 2; expect init state to still be false
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), &types.BootstrapData{Header: types.Header{Slot: 123456}})
ts.ExpFail(testServer2)
ts.Run(3)
chain.ExpInit(t, false)
// server 1 fails (hard timeout)
ts.RequestEvent(request.EvFail, ts.Request(1, 1), nil)
ts.Run(4)
chain.ExpInit(t, false)
// server 3 is registered; expect bootstrap request to server 3
ts.AddServer(testServer3, 1)
ts.Run(5, testServer3, ReqCheckpointData(checkpointHash))
// valid response from server 3; expect chain to be initialized
ts.RequestEvent(request.EvResponse, ts.Request(5, 1), checkpoint)
ts.Run(6)
chain.ExpInit(t, true)
}
func TestUpdateSyncParallel(t *testing.T) {
chain := &TestCommitteeChain{}
chain.SetNextSyncPeriod(0)
updateSync := NewForwardUpdateSync(chain)
ts := NewTestScheduler(t, updateSync)
// add 2 servers, head at period 100; allow 3-3 parallel requests for each
ts.AddServer(testServer1, 3)
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, types.OptimisticUpdate{SignatureSlot: 0x2000*100 + 0x1000})
ts.AddServer(testServer2, 3)
ts.ServerEvent(EvNewOptimisticUpdate, testServer2, types.OptimisticUpdate{SignatureSlot: 0x2000*100 + 0x1000})
// expect 6 requests to be sent
ts.Run(1,
testServer1, ReqUpdates{FirstPeriod: 0, Count: 8},
testServer1, ReqUpdates{FirstPeriod: 8, Count: 8},
testServer1, ReqUpdates{FirstPeriod: 16, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 24, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 32, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 40, Count: 8})
// valid response to request 1; expect 8 periods synced and a new request started
ts.RequestEvent(request.EvResponse, ts.Request(1, 1), testRespUpdate(ts.Request(1, 1)))
ts.AddAllowance(testServer1, 1)
ts.Run(2, testServer1, ReqUpdates{FirstPeriod: 48, Count: 8})
chain.ExpNextSyncPeriod(t, 8)
// valid response to requests 4 and 5
ts.RequestEvent(request.EvResponse, ts.Request(1, 4), testRespUpdate(ts.Request(1, 4)))
ts.RequestEvent(request.EvResponse, ts.Request(1, 5), testRespUpdate(ts.Request(1, 5)))
ts.AddAllowance(testServer2, 2)
// expect 2 more requests but no sync progress (responses 4 and 5 cannot be added before 2 and 3)
ts.Run(3,
testServer2, ReqUpdates{FirstPeriod: 56, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 64, Count: 8})
chain.ExpNextSyncPeriod(t, 8)
// soft timeout for requests 2 and 3 (server 1 is overloaded)
ts.RequestEvent(request.EvTimeout, ts.Request(1, 2), nil)
ts.RequestEvent(request.EvTimeout, ts.Request(1, 3), nil)
// no allowance, no more requests
ts.Run(4)
// valid response to requests 6 and 8 and 9
ts.RequestEvent(request.EvResponse, ts.Request(1, 6), testRespUpdate(ts.Request(1, 6)))
ts.RequestEvent(request.EvResponse, ts.Request(3, 1), testRespUpdate(ts.Request(3, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(3, 2), testRespUpdate(ts.Request(3, 2)))
ts.AddAllowance(testServer2, 3)
// server 2 can now resend requests 2 and 3 (timed out by server 1) and also send a new one
ts.Run(5,
testServer2, ReqUpdates{FirstPeriod: 8, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 16, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 72, Count: 8})
// server 1 finally answers timed out request 2
ts.RequestEvent(request.EvResponse, ts.Request(1, 2), testRespUpdate(ts.Request(1, 2)))
ts.AddAllowance(testServer1, 1)
// expect sync progress and one new request
ts.Run(6, testServer1, ReqUpdates{FirstPeriod: 80, Count: 8})
chain.ExpNextSyncPeriod(t, 16)
// server 2 answers requests 11 and 12 (resends of requests 2 and 3)
ts.RequestEvent(request.EvResponse, ts.Request(5, 1), testRespUpdate(ts.Request(5, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(5, 2), testRespUpdate(ts.Request(5, 2)))
ts.AddAllowance(testServer2, 2)
ts.Run(7,
testServer2, ReqUpdates{FirstPeriod: 88, Count: 8},
testServer2, ReqUpdates{FirstPeriod: 96, Count: 4})
// finally the gap is filled, update can process responses up to req6
chain.ExpNextSyncPeriod(t, 48)
// all remaining requests are answered
ts.RequestEvent(request.EvResponse, ts.Request(1, 3), testRespUpdate(ts.Request(1, 3)))
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testRespUpdate(ts.Request(2, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(5, 3), testRespUpdate(ts.Request(5, 3)))
ts.RequestEvent(request.EvResponse, ts.Request(6, 1), testRespUpdate(ts.Request(6, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testRespUpdate(ts.Request(7, 1)))
ts.RequestEvent(request.EvResponse, ts.Request(7, 2), testRespUpdate(ts.Request(7, 2)))
ts.Run(8)
// expect chain to be fully synced
chain.ExpNextSyncPeriod(t, 100)
}
func TestUpdateSyncDifferentHeads(t *testing.T) {
chain := &TestCommitteeChain{}
chain.SetNextSyncPeriod(10)
updateSync := NewForwardUpdateSync(chain)
ts := NewTestScheduler(t, updateSync)
// add 3 servers with different announced head periods
ts.AddServer(testServer1, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer1, types.OptimisticUpdate{SignatureSlot: 0x2000*15 + 0x1000})
ts.AddServer(testServer2, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer2, types.OptimisticUpdate{SignatureSlot: 0x2000*16 + 0x1000})
ts.AddServer(testServer3, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer3, types.OptimisticUpdate{SignatureSlot: 0x2000*17 + 0x1000})
// expect request to the best announced head
ts.Run(1, testServer3, ReqUpdates{FirstPeriod: 10, Count: 7})
// request times out, expect request to the next best head
ts.RequestEvent(request.EvTimeout, ts.Request(1, 1), nil)
ts.Run(2, testServer2, ReqUpdates{FirstPeriod: 10, Count: 6})
// request times out, expect request to the last available server
ts.RequestEvent(request.EvTimeout, ts.Request(2, 1), nil)
ts.Run(3, testServer1, ReqUpdates{FirstPeriod: 10, Count: 5})
// valid response to request 3, expect chain synced to period 15
ts.RequestEvent(request.EvResponse, ts.Request(3, 1), testRespUpdate(ts.Request(3, 1)))
ts.AddAllowance(testServer1, 1)
ts.Run(4)
chain.ExpNextSyncPeriod(t, 15)
// invalid response to request 1, server can only deliver updates up to period 15 despite announced head
truncated := ts.Request(1, 1)
truncated.request = ReqUpdates{FirstPeriod: 10, Count: 5}
ts.RequestEvent(request.EvResponse, ts.Request(1, 1), testRespUpdate(truncated))
ts.ExpFail(testServer3)
ts.Run(5)
// expect no progress of chain head
chain.ExpNextSyncPeriod(t, 15)
// valid response to request 2, expect chain synced to period 16
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testRespUpdate(ts.Request(2, 1)))
ts.AddAllowance(testServer2, 1)
ts.Run(6)
chain.ExpNextSyncPeriod(t, 16)
// a new server is registered with announced head period 17
ts.AddServer(testServer4, 1)
ts.ServerEvent(EvNewOptimisticUpdate, testServer4, types.OptimisticUpdate{SignatureSlot: 0x2000*17 + 0x1000})
// expect request to sync one more period
ts.Run(7, testServer4, ReqUpdates{FirstPeriod: 16, Count: 1})
// valid response, expect chain synced to period 17
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testRespUpdate(ts.Request(7, 1)))
ts.AddAllowance(testServer4, 1)
ts.Run(8)
chain.ExpNextSyncPeriod(t, 17)
}
func testRespUpdate(request requestWithID) request.Response {
var resp RespUpdates
if request.request == nil {
return resp
}
req := request.request.(ReqUpdates)
resp.Updates = make([]*types.LightClientUpdate, int(req.Count))
resp.Committees = make([]*types.SerializedSyncCommittee, int(req.Count))
period := req.FirstPeriod
for i := range resp.Updates {
resp.Updates[i] = &types.LightClientUpdate{AttestedHeader: types.SignedHeader{Header: types.Header{Slot: 0x2000*period + 0x1000}}}
resp.Committees[i] = new(types.SerializedSyncCommittee)
period++
}
return resp
}

@ -41,4 +41,6 @@ const (
StateIndexNextSyncCommittee = 55 StateIndexNextSyncCommittee = 55
StateIndexExecPayload = 56 StateIndexExecPayload = 56
StateIndexExecHead = 908 StateIndexExecHead = 908
BodyIndexExecPayload = 25
) )

@ -0,0 +1,110 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/protolambda/zrnt/eth2/beacon/capella"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/zrnt/eth2/beacon/deneb"
"github.com/protolambda/zrnt/eth2/configs"
"github.com/protolambda/ztyp/tree"
)
type blockObject interface {
HashTreeRoot(spec *zrntcommon.Spec, hFn tree.HashFn) zrntcommon.Root
Header(spec *zrntcommon.Spec) *zrntcommon.BeaconBlockHeader
}
// BeaconBlock represents a full block in the beacon chain.
type BeaconBlock struct {
blockObj blockObject
}
// BlockFromJSON decodes a beacon block from JSON.
func BlockFromJSON(forkName string, data []byte) (*BeaconBlock, error) {
var obj blockObject
switch forkName {
case "deneb":
obj = new(deneb.BeaconBlock)
case "capella":
obj = new(capella.BeaconBlock)
default:
return nil, fmt.Errorf("unsupported fork: " + forkName)
}
if err := json.Unmarshal(data, obj); err != nil {
return nil, err
}
return &BeaconBlock{obj}, nil
}
// NewBeaconBlock wraps a ZRNT block.
func NewBeaconBlock(obj blockObject) *BeaconBlock {
switch obj := obj.(type) {
case *capella.BeaconBlock:
return &BeaconBlock{obj}
case *deneb.BeaconBlock:
return &BeaconBlock{obj}
default:
panic(fmt.Errorf("unsupported block type %T", obj))
}
}
// Slot returns the slot number of the block.
func (b *BeaconBlock) Slot() uint64 {
switch obj := b.blockObj.(type) {
case *capella.BeaconBlock:
return uint64(obj.Slot)
case *deneb.BeaconBlock:
return uint64(obj.Slot)
default:
panic(fmt.Errorf("unsupported block type %T", b.blockObj))
}
}
// ExecutionPayload parses and returns the execution payload of the block.
func (b *BeaconBlock) ExecutionPayload() (*types.Block, error) {
switch obj := b.blockObj.(type) {
case *capella.BeaconBlock:
return convertPayload(&obj.Body.ExecutionPayload, &obj.ParentRoot)
case *deneb.BeaconBlock:
return convertPayload(&obj.Body.ExecutionPayload, &obj.ParentRoot)
default:
panic(fmt.Errorf("unsupported block type %T", b.blockObj))
}
}
// Header returns the block's header data.
func (b *BeaconBlock) Header() Header {
switch obj := b.blockObj.(type) {
case *capella.BeaconBlock:
return headerFromZRNT(obj.Header(configs.Mainnet))
case *deneb.BeaconBlock:
return headerFromZRNT(obj.Header(configs.Mainnet))
default:
panic(fmt.Errorf("unsupported block type %T", b.blockObj))
}
}
// Root computes the SSZ root hash of the block.
func (b *BeaconBlock) Root() common.Hash {
return common.Hash(b.blockObj.HashTreeRoot(configs.Mainnet, tree.GetHashFn()))
}

@ -0,0 +1,77 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"os"
"path/filepath"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestBlockFromJSON(t *testing.T) {
type blocktest struct {
file string
version string
wantSlot uint64
wantBlockNumber uint64
wantBlockHash common.Hash
}
tests := []blocktest{
{
file: "block_deneb.json",
version: "deneb",
wantSlot: 8631513,
wantBlockNumber: 19431837,
wantBlockHash: common.HexToHash("0x4cf7d9108fc01b50023ab7cab9b372a96068fddcadec551630393b65acb1f34c"),
},
{
file: "block_capella.json",
version: "capella",
wantSlot: 7378495,
wantBlockNumber: 18189758,
wantBlockHash: common.HexToHash("0x802acf5c350f4252e31d83c431fcb259470250fa0edf49e8391cfee014239820"),
},
}
for _, test := range tests {
t.Run(test.file, func(t *testing.T) {
data, err := os.ReadFile(filepath.Join("testdata", test.file))
if err != nil {
t.Fatal(err)
}
beaconBlock, err := BlockFromJSON(test.version, data)
if err != nil {
t.Fatal(err)
}
if beaconBlock.Slot() != test.wantSlot {
t.Errorf("wrong slot number %d", beaconBlock.Slot())
}
execBlock, err := beaconBlock.ExecutionPayload()
if err != nil {
t.Fatalf("payload extraction failed: %v", err)
}
if execBlock.NumberU64() != test.wantBlockNumber {
t.Errorf("wrong block number: %v", execBlock.NumberU64())
}
if execBlock.Hash() != test.wantBlockHash {
t.Errorf("wrong block hash: %v", execBlock.Hash())
}
})
}
}

@ -19,7 +19,9 @@ package types
import ( import (
"crypto/sha256" "crypto/sha256"
"fmt" "fmt"
"math"
"os" "os"
"slices"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -27,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/beacon/merkle" "github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
@ -34,10 +37,12 @@ import (
// across signing different data structures. // across signing different data structures.
const syncCommitteeDomain = 7 const syncCommitteeDomain = 7
var knownForks = []string{"GENESIS", "ALTAIR", "BELLATRIX", "CAPELLA", "DENEB"}
// Fork describes a single beacon chain fork and also stores the calculated // Fork describes a single beacon chain fork and also stores the calculated
// signature domain used after this fork. // signature domain used after this fork.
type Fork struct { type Fork struct {
// Name of the fork in the chain config (config.yaml) file{ // Name of the fork in the chain config (config.yaml) file
Name string Name string
// Epoch when given fork version is activated // Epoch when given fork version is activated
@ -46,6 +51,9 @@ type Fork struct {
// Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types // Fork version, see https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#custom-types
Version []byte Version []byte
// index in list of known forks or MaxInt if unknown
knownIndex int
// calculated by computeDomain, based on fork version and genesis validators root // calculated by computeDomain, based on fork version and genesis validators root
domain merkle.Value domain merkle.Value
} }
@ -101,7 +109,12 @@ func (f Forks) SigningRoot(header Header) (common.Hash, error) {
func (f Forks) Len() int { return len(f) } func (f Forks) Len() int { return len(f) }
func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] } func (f Forks) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
func (f Forks) Less(i, j int) bool { return f[i].Epoch < f[j].Epoch } func (f Forks) Less(i, j int) bool {
if f[i].Epoch != f[j].Epoch {
return f[i].Epoch < f[j].Epoch
}
return f[i].knownIndex < f[j].knownIndex
}
// ChainConfig contains the beacon chain configuration. // ChainConfig contains the beacon chain configuration.
type ChainConfig struct { type ChainConfig struct {
@ -110,18 +123,34 @@ type ChainConfig struct {
Forks Forks Forks Forks
} }
// ForkAtEpoch returns the latest active fork at the given epoch.
func (c *ChainConfig) ForkAtEpoch(epoch uint64) Fork {
for i := len(c.Forks) - 1; i >= 0; i-- {
if c.Forks[i].Epoch <= epoch {
return *c.Forks[i]
}
}
return Fork{}
}
// AddFork adds a new item to the list of forks. // AddFork adds a new item to the list of forks.
func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainConfig { func (c *ChainConfig) AddFork(name string, epoch uint64, version []byte) *ChainConfig {
knownIndex := slices.Index(knownForks, name)
if knownIndex == -1 {
knownIndex = math.MaxInt // assume that the unknown fork happens after the known ones
if epoch != math.MaxUint64 {
log.Warn("Unknown fork in config.yaml", "fork name", name, "known forks", knownForks)
}
}
fork := &Fork{ fork := &Fork{
Name: name, Name: name,
Epoch: epoch, Epoch: epoch,
Version: version, Version: version,
knownIndex: knownIndex,
} }
fork.computeDomain(c.GenesisValidatorsRoot) fork.computeDomain(c.GenesisValidatorsRoot)
c.Forks = append(c.Forks, fork) c.Forks = append(c.Forks, fork)
sort.Sort(c.Forks) sort.Sort(c.Forks)
return c return c
} }
@ -171,6 +200,5 @@ func (c *ChainConfig) LoadForks(path string) error {
for name := range versions { for name := range versions {
return fmt.Errorf("epoch number missing for fork %q in beacon chain config file", name) return fmt.Errorf("epoch number missing for fork %q in beacon chain config file", name)
} }
sort.Sort(c.Forks)
return nil return nil
} }

@ -0,0 +1,80 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"encoding/json"
"fmt"
"github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/common"
"github.com/protolambda/zrnt/eth2/beacon/capella"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/zrnt/eth2/beacon/deneb"
"github.com/protolambda/ztyp/tree"
)
type headerObject interface {
HashTreeRoot(hFn tree.HashFn) zrntcommon.Root
}
type ExecutionHeader struct {
obj headerObject
}
// ExecutionHeaderFromJSON decodes an execution header from JSON data provided by
// the beacon chain API.
func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, error) {
var obj headerObject
switch forkName {
case "capella":
obj = new(capella.ExecutionPayloadHeader)
case "deneb":
obj = new(deneb.ExecutionPayloadHeader)
default:
return nil, fmt.Errorf("unsupported fork: " + forkName)
}
if err := json.Unmarshal(data, obj); err != nil {
return nil, err
}
return &ExecutionHeader{obj: obj}, nil
}
func NewExecutionHeader(obj headerObject) *ExecutionHeader {
switch obj.(type) {
case *capella.ExecutionPayloadHeader:
case *deneb.ExecutionPayloadHeader:
default:
panic(fmt.Errorf("unsupported ExecutionPayloadHeader type %T", obj))
}
return &ExecutionHeader{obj: obj}
}
func (eh *ExecutionHeader) PayloadRoot() merkle.Value {
return merkle.Value(eh.obj.HashTreeRoot(tree.GetHashFn()))
}
func (eh *ExecutionHeader) BlockHash() common.Hash {
switch obj := eh.obj.(type) {
case *capella.ExecutionPayloadHeader:
return common.Hash(obj.BlockHash)
case *deneb.ExecutionPayloadHeader:
return common.Hash(obj.BlockHash)
default:
panic(fmt.Errorf("unsupported ExecutionPayloadHeader type %T", obj))
}
}

@ -0,0 +1,141 @@
// Copyright 2024 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package types
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie"
"github.com/holiman/uint256"
"github.com/protolambda/zrnt/eth2/beacon/capella"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
"github.com/protolambda/zrnt/eth2/beacon/deneb"
)
type payloadType interface {
*capella.ExecutionPayload | *deneb.ExecutionPayload
}
// convertPayload converts a beacon chain execution payload to types.Block.
func convertPayload[T payloadType](payload T, parentRoot *zrntcommon.Root) (*types.Block, error) {
var (
header types.Header
transactions []*types.Transaction
withdrawals []*types.Withdrawal
expectedHash [32]byte
err error
)
switch p := any(payload).(type) {
case *capella.ExecutionPayload:
convertCapellaHeader(p, &header)
transactions, err = convertTransactions(p.Transactions, &header)
if err != nil {
return nil, err
}
withdrawals = convertWithdrawals(p.Withdrawals, &header)
expectedHash = p.BlockHash
case *deneb.ExecutionPayload:
convertDenebHeader(p, common.Hash(*parentRoot), &header)
transactions, err = convertTransactions(p.Transactions, &header)
if err != nil {
return nil, err
}
withdrawals = convertWithdrawals(p.Withdrawals, &header)
expectedHash = p.BlockHash
default:
panic("unsupported block type")
}
block := types.NewBlockWithHeader(&header).WithBody(types.Body{Transactions: transactions, Withdrawals: withdrawals})
if hash := block.Hash(); hash != expectedHash {
return nil, fmt.Errorf("sanity check failed, payload hash does not match (expected %x, got %x)", expectedHash, hash)
}
return block, nil
}
func convertCapellaHeader(payload *capella.ExecutionPayload, h *types.Header) {
// note: h.TxHash is set in convertTransactions
h.ParentHash = common.Hash(payload.ParentHash)
h.UncleHash = types.EmptyUncleHash
h.Coinbase = common.Address(payload.FeeRecipient)
h.Root = common.Hash(payload.StateRoot)
h.ReceiptHash = common.Hash(payload.ReceiptsRoot)
h.Bloom = types.Bloom(payload.LogsBloom)
h.Difficulty = common.Big0
h.Number = new(big.Int).SetUint64(uint64(payload.BlockNumber))
h.GasLimit = uint64(payload.GasLimit)
h.GasUsed = uint64(payload.GasUsed)
h.Time = uint64(payload.Timestamp)
h.Extra = []byte(payload.ExtraData)
h.MixDigest = common.Hash(payload.PrevRandao)
h.Nonce = types.BlockNonce{}
h.BaseFee = (*uint256.Int)(&payload.BaseFeePerGas).ToBig()
}
func convertDenebHeader(payload *deneb.ExecutionPayload, parentRoot common.Hash, h *types.Header) {
// note: h.TxHash is set in convertTransactions
h.ParentHash = common.Hash(payload.ParentHash)
h.UncleHash = types.EmptyUncleHash
h.Coinbase = common.Address(payload.FeeRecipient)
h.Root = common.Hash(payload.StateRoot)
h.ReceiptHash = common.Hash(payload.ReceiptsRoot)
h.Bloom = types.Bloom(payload.LogsBloom)
h.Difficulty = common.Big0
h.Number = new(big.Int).SetUint64(uint64(payload.BlockNumber))
h.GasLimit = uint64(payload.GasLimit)
h.GasUsed = uint64(payload.GasUsed)
h.Time = uint64(payload.Timestamp)
h.Extra = []byte(payload.ExtraData)
h.MixDigest = common.Hash(payload.PrevRandao)
h.Nonce = types.BlockNonce{}
h.BaseFee = (*uint256.Int)(&payload.BaseFeePerGas).ToBig()
// new in deneb
h.BlobGasUsed = (*uint64)(&payload.BlobGasUsed)
h.ExcessBlobGas = (*uint64)(&payload.ExcessBlobGas)
h.ParentBeaconRoot = &parentRoot
}
func convertTransactions(list zrntcommon.PayloadTransactions, execHeader *types.Header) ([]*types.Transaction, error) {
txs := make([]*types.Transaction, len(list))
for i, opaqueTx := range list {
var tx types.Transaction
if err := tx.UnmarshalBinary(opaqueTx); err != nil {
return nil, fmt.Errorf("failed to parse tx %d: %v", i, err)
}
txs[i] = &tx
}
execHeader.TxHash = types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil))
return txs, nil
}
func convertWithdrawals(list zrntcommon.Withdrawals, execHeader *types.Header) []*types.Withdrawal {
withdrawals := make([]*types.Withdrawal, len(list))
for i, w := range list {
withdrawals[i] = &types.Withdrawal{
Index: uint64(w.Index),
Validator: uint64(w.ValidatorIndex),
Address: common.Address(w.Address),
Amount: uint64(w.Amount),
}
}
wroot := types.DeriveSha(types.Withdrawals(withdrawals), trie.NewStackTrie(nil))
execHeader.WithdrawalsHash = &wroot
return withdrawals
}

@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/beacon/merkle" "github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/beacon/params" "github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
zrntcommon "github.com/protolambda/zrnt/eth2/beacon/common"
) )
//go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go //go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go
@ -57,6 +58,16 @@ type Header struct {
BodyRoot common.Hash `gencodec:"required" json:"body_root"` BodyRoot common.Hash `gencodec:"required" json:"body_root"`
} }
func headerFromZRNT(zh *zrntcommon.BeaconBlockHeader) Header {
return Header{
Slot: uint64(zh.Slot),
ProposerIndex: uint64(zh.ProposerIndex),
ParentRoot: common.Hash(zh.ParentRoot),
StateRoot: common.Hash(zh.StateRoot),
BodyRoot: common.Hash(zh.BodyRoot),
}
}
// headerMarshaling is a field type overrides for gencodec. // headerMarshaling is a field type overrides for gencodec.
type headerMarshaling struct { type headerMarshaling struct {
Slot common.Decimal Slot common.Decimal

@ -23,8 +23,15 @@ import (
"github.com/ethereum/go-ethereum/beacon/merkle" "github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/beacon/params" "github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
ctypes "github.com/ethereum/go-ethereum/core/types"
) )
// HeadInfo represents an unvalidated new head announcement.
type HeadInfo struct {
Slot uint64
BlockRoot common.Hash
}
// BootstrapData contains a sync committee where light sync can be started, // BootstrapData contains a sync committee where light sync can be started,
// together with a proof through a beacon header and corresponding state. // together with a proof through a beacon header and corresponding state.
// Note: BootstrapData is fetched from a server based on a known checkpoint hash. // Note: BootstrapData is fetched from a server based on a known checkpoint hash.
@ -134,3 +141,96 @@ func (u UpdateScore) BetterThan(w UpdateScore) bool {
} }
return u.SignerCount > w.SignerCount return u.SignerCount > w.SignerCount
} }
// HeaderWithExecProof contains a beacon header and proves the belonging execution
// payload header with a Merkle proof.
type HeaderWithExecProof struct {
Header
PayloadHeader *ExecutionHeader
PayloadBranch merkle.Values
}
// Validate verifies the Merkle proof of the execution payload header.
func (h *HeaderWithExecProof) Validate() error {
return merkle.VerifyProof(h.BodyRoot, params.BodyIndexExecPayload, h.PayloadBranch, h.PayloadHeader.PayloadRoot())
}
// OptimisticUpdate proves sync committee commitment on the attested beacon header.
// It also proves the belonging execution payload header with a Merkle proof.
//
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
type OptimisticUpdate struct {
Attested HeaderWithExecProof
// Sync committee BLS signature aggregate
Signature SyncAggregate
// Slot in which the signature has been created (newer than Header.Slot,
// determines the signing sync committee)
SignatureSlot uint64
}
// SignedHeader returns the signed attested header of the update.
func (u *OptimisticUpdate) SignedHeader() SignedHeader {
return SignedHeader{
Header: u.Attested.Header,
Signature: u.Signature,
SignatureSlot: u.SignatureSlot,
}
}
// Validate verifies the Merkle proof proving the execution payload header.
// Note that the sync committee signature of the attested header should be
// verified separately by a synced committee chain.
func (u *OptimisticUpdate) Validate() error {
return u.Attested.Validate()
}
// FinalityUpdate proves a finalized beacon header by a sync committee commitment
// on an attested beacon header, referring to the latest finalized header with a
// Merkle proof.
// It also proves the execution payload header belonging to both the attested and
// the finalized beacon header with Merkle proofs.
//
// See data structure definition here:
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
type FinalityUpdate struct {
Attested, Finalized HeaderWithExecProof
FinalityBranch merkle.Values
// Sync committee BLS signature aggregate
Signature SyncAggregate
// Slot in which the signature has been created (newer than Header.Slot,
// determines the signing sync committee)
SignatureSlot uint64
}
// SignedHeader returns the signed attested header of the update.
func (u *FinalityUpdate) SignedHeader() SignedHeader {
return SignedHeader{
Header: u.Attested.Header,
Signature: u.Signature,
SignatureSlot: u.SignatureSlot,
}
}
// Validate verifies the Merkle proofs proving the finalized beacon header and
// the execution payload headers belonging to the attested and finalized headers.
// Note that the sync committee signature of the attested header should be
// verified separately by a synced committee chain.
func (u *FinalityUpdate) Validate() error {
if err := u.Attested.Validate(); err != nil {
return err
}
if err := u.Finalized.Validate(); err != nil {
return err
}
return merkle.VerifyProof(u.Attested.StateRoot, params.StateIndexFinalBlock, u.FinalityBranch, merkle.Value(u.Finalized.Hash()))
}
// ChainHeadEvent returns an authenticated execution payload associated with the
// latest accepted head of the beacon chain, along with the hash of the latest
// finalized execution block.
type ChainHeadEvent struct {
BeaconHead Header
Block *ctypes.Block
Finalized common.Hash
}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

@ -1,65 +1,101 @@
# This file contains sha256 checksums of optional build dependencies. # This file contains sha256 checksums of optional build dependencies.
# version:spec-tests 1.0.6 # version:spec-tests 2.1.0
# https://github.com/ethereum/execution-spec-tests/releases # https://github.com/ethereum/execution-spec-tests/releases
# https://github.com/ethereum/execution-spec-tests/releases/download/v1.0.6/ # https://github.com/ethereum/execution-spec-tests/releases/download/v2.1.0/
485af7b66cf41eb3a8c1bd46632913b8eb95995df867cf665617bbc9b4beedd1 fixtures_develop.tar.gz ca89c76851b0900bfcc3cbb9a26cbece1f3d7c64a3bed38723e914713290df6c fixtures_develop.tar.gz
# version:golang 1.21.5 # version:golang 1.22.4
# https://go.dev/dl/ # https://go.dev/dl/
285cbbdf4b6e6e62ed58f370f3f6d8c30825d6e56c5853c66d3c23bcdb09db19 go1.21.5.src.tar.gz fed720678e728a7ca30ba8d1ded1caafe27d16028fab0232b8ba8e22008fb784 go1.22.4.src.tar.gz
a2e1d5743e896e5fe1e7d96479c0a769254aed18cf216cf8f4c3a2300a9b3923 go1.21.5.darwin-amd64.tar.gz b9647fa9fc83a0cc5d4f092a19eaeaecf45f063a5aa7d4962fde65aeb7ae6ce1 go1.22.4.aix-ppc64.tar.gz
d0f8ac0c4fb3efc223a833010901d02954e3923cfe2c9a2ff0e4254a777cc9cc go1.21.5.darwin-arm64.tar.gz 7788f40f3a46f201df1dc46ca640403eb535d5513fc33449164a90dbd229b761 go1.22.4.darwin-amd64.pkg
2c05bbe0dc62456b90b7ddd354a54f373b7c377a98f8b22f52ab694b4f6cca58 go1.21.5.freebsd-386.tar.gz c95967f50aa4ace34af0c236cbdb49a9a3e80ee2ad09d85775cb4462a5c19ed3 go1.22.4.darwin-amd64.tar.gz
30b6c64e9a77129605bc12f836422bf09eec577a8c899ee46130aeff81567003 go1.21.5.freebsd-amd64.tar.gz 4036c88faf57a6b096916f1827edcdbf5290a47cc5f59956e88cdd9b1b71088c go1.22.4.darwin-arm64.pkg
8f4dba9cf5c61757bbd7e9ebdb93b6a30a1b03f4a636a1ba0cc2f27b907ab8e1 go1.21.5.linux-386.tar.gz 242b78dc4c8f3d5435d28a0d2cec9b4c1aa999b601fb8aa59fb4e5a1364bf827 go1.22.4.darwin-arm64.tar.gz
e2bc0b3e4b64111ec117295c088bde5f00eeed1567999ff77bc859d7df70078e go1.21.5.linux-amd64.tar.gz f2fbb51af4719d3616efb482d6ed2b96579b474156f85a7ddc6f126764feec4b go1.22.4.dragonfly-amd64.tar.gz
841cced7ecda9b2014f139f5bab5ae31785f35399f236b8b3e75dff2a2978d96 go1.21.5.linux-arm64.tar.gz 7c54884bb9f274884651d41e61d1bc12738863ad1497e97ea19ad0e9aa6bf7b5 go1.22.4.freebsd-386.tar.gz
837f4bf4e22fcdf920ffeaa4abf3d02d1314e03725431065f4d44c46a01b42fe go1.21.5.linux-armv6l.tar.gz 88d44500e1701dd35797619774d6dd51bf60f45a8338b0a82ddc018e4e63fb78 go1.22.4.freebsd-amd64.tar.gz
907b8c6ec4be9b184952e5d3493be66b1746442394a8bc78556c56834cd7c38b go1.21.5.linux-ppc64le.tar.gz 3d9efe47db142a22679aba46b1772e3900b0d87ae13bd2b3bc80dbf2ac0b2cd6 go1.22.4.freebsd-arm.tar.gz
9c4a81b72ebe44368813cd03684e1080a818bf915d84163abae2ed325a1b2dc0 go1.21.5.linux-s390x.tar.gz 726dc093cf020277be45debf03c3b02b43c2efb3e2a5d4fba8f52579d65327dc go1.22.4.freebsd-arm64.tar.gz
6da2418889dfb37763d0eb149c4a8d728c029e12f0cd54fbca0a31ae547e2d34 go1.21.5.windows-386.zip 5f6b67e5e32f1d6ccb2d4dcb44934a5e2e870a877ba7443d86ec43cfc28afa71 go1.22.4.freebsd-riscv64.tar.gz
bbe603cde7c9dee658f45164b4d06de1eff6e6e6b800100824e7c00d56a9a92f go1.21.5.windows-amd64.zip d56ecc2f85b6418a21ef83879594d0c42ab4f65391a676bb12254870e6690d63 go1.22.4.illumos-amd64.tar.gz
9b7acca50e674294e43202df4fbc26d5af4d8bc3170a3342a1514f09a2dab5e9 go1.21.5.windows-arm64.zip 47a2a8d249a91eb8605c33bceec63aedda0441a43eac47b4721e3975ff916cec go1.22.4.linux-386.tar.gz
ba79d4526102575196273416239cca418a651e049c2b099f3159db85e7bade7d go1.22.4.linux-amd64.tar.gz
a8e177c354d2e4a1b61020aca3562e27ea3e8f8247eca3170e3fa1e0c2f9e771 go1.22.4.linux-arm64.tar.gz
e2b143fbacbc9cbd448e9ef41ac3981f0488ce849af1cf37e2341d09670661de go1.22.4.linux-armv6l.tar.gz
e2ff9436e4b34bf6926b06d97916e26d67a909a2effec17967245900f0816f1d go1.22.4.linux-loong64.tar.gz
73f0dcc60458c4770593b05a7bc01cc0d31fc98f948c0c2334812c7a1f2fc3f1 go1.22.4.linux-mips.tar.gz
417af97fc2630a647052375768be4c38adcc5af946352ea5b28613ea81ca5d45 go1.22.4.linux-mips64.tar.gz
7486e2d7dd8c98eb44df815ace35a7fe7f30b7c02326e3741bd934077508139b go1.22.4.linux-mips64le.tar.gz
69479c8aad301e459a8365b40cad1074a0dbba5defb9291669f94809c4c4be6e go1.22.4.linux-mipsle.tar.gz
dd238847e65bc3e2745caca475a5db6522a2fcf85cf6c38fc36a06642b19efd7 go1.22.4.linux-ppc64.tar.gz
a3e5834657ef92523f570f798fed42f1f87bc18222a16815ec76b84169649ec4 go1.22.4.linux-ppc64le.tar.gz
56a827ff7dc6245bcd7a1e9288dffaa1d8b0fd7468562264c1523daf3b4f1b4a go1.22.4.linux-riscv64.tar.gz
7590c3e278e2dc6040aae0a39da3ca1eb2e3921673a7304cc34d588c45889eec go1.22.4.linux-s390x.tar.gz
ddd2eebe34471a2502de6c5dad04ab27c9fc80cbde7a9ad5b3c66ecec4504e1d go1.22.4.netbsd-386.tar.gz
33af79f6f935f6fbacc5d23876450b3567b79348fc065beef8e64081127dd234 go1.22.4.netbsd-amd64.tar.gz
fa3550ebd5375a70b3bcd342b5a71f4bd271dcbbfaf4eabefa2144ab5d8924b6 go1.22.4.netbsd-arm.tar.gz
c9a2971dec9f6d320c6f2b049b2353c6d0a2d35e87b8a4b2d78a2f0d62545f8e go1.22.4.netbsd-arm64.tar.gz
d21af022331bfdc2b5b161d616c3a1a4573d33cf7a30416ee509a8f3641deb47 go1.22.4.openbsd-386.tar.gz
72c0094c43f7e5722ec49c2a3e9dfa7a1123ac43a5f3a63eecf3e3795d3ff0ae go1.22.4.openbsd-amd64.tar.gz
1096831ea3c5ea3ca57d14251d9eda3786889531eb40d7d6775dcaa324d4b065 go1.22.4.openbsd-arm.tar.gz
a7ab8d4e0b02bf06ed144ba42c61c0e93ee00f2b433415dfd4ad4b6e79f31650 go1.22.4.openbsd-arm64.tar.gz
9716327c8a628358798898dc5148c49dbbeb5196bf2cbf088e550721a6e4f60b go1.22.4.openbsd-ppc64.tar.gz
a8dd4503c95c32a502a616ab78870a19889c9325fe9bd31eb16dd69346e4bfa8 go1.22.4.plan9-386.tar.gz
5423a25808d76fe5aca8607a2e5ac5673abf45446b168cb5e9d8519ee9fe39a1 go1.22.4.plan9-amd64.tar.gz
6af939ad583f5c85c09c53728ab7d38c3cc2b39167562d6c18a07c5c6608b370 go1.22.4.plan9-arm.tar.gz
e8cabe69c03085725afdb32a6f9998191a3e55a747b270d835fd05000d56abba go1.22.4.solaris-amd64.tar.gz
5c6446e2ea80bc6a971d2b34446f16e6517e638b0ff8d3ea229228d1931790b0 go1.22.4.windows-386.msi
aca4e2c37278a10f1c70dd0df142f7d66b50334fcee48978d409202d308d6d25 go1.22.4.windows-386.zip
3c21105d7b584759b6e266383b777caf6e87142d304a10b539dbc66ab482bb5f go1.22.4.windows-amd64.msi
26321c4d945a0035d8a5bc4a1965b0df401ff8ceac66ce2daadabf9030419a98 go1.22.4.windows-amd64.zip
c4303f02b864304eb83dd1db0b4ebf9d2ec9d216e7ef44a7657b166a52889c7f go1.22.4.windows-arm.msi
5fcd0671a49cecf39b41021621ee1b6e7aa1370f37122b72e80d4fd4185833b6 go1.22.4.windows-arm.zip
553cc6c460f4e3eb4fad5b897c0bb22cd8bbeb20929f0e3eeb939420320292ce go1.22.4.windows-arm64.msi
8a2daa9ea28cbdafddc6171aefed384f4e5b6e714fb52116fe9ed25a132f37ed go1.22.4.windows-arm64.zip
# version:golangci 1.55.2 # version:golangci 1.59.0
# https://github.com/golangci/golangci-lint/releases/ # https://github.com/golangci/golangci-lint/releases/
# https://github.com/golangci/golangci-lint/releases/download/v1.55.2/ # https://github.com/golangci/golangci-lint/releases/download/v1.59.0/
632e96e6d5294fbbe7b2c410a49c8fa01c60712a0af85a567de85bcc1623ea21 golangci-lint-1.55.2-darwin-amd64.tar.gz 418acf7e255ddc0783e97129c9b03d9311b77826a5311d425a01c708a86417e7 golangci-lint-1.59.0-darwin-amd64.tar.gz
234463f059249f82045824afdcdd5db5682d0593052f58f6a3039a0a1c3899f6 golangci-lint-1.55.2-darwin-arm64.tar.gz 5f6a1d95a6dd69f6e328eb56dd311a38e04cfab79a1305fbf4957f4e203f47b6 golangci-lint-1.59.0-darwin-arm64.tar.gz
2bdd105e2d4e003a9058c33a22bb191a1e0f30fa0790acca0d8fbffac1d6247c golangci-lint-1.55.2-freebsd-386.tar.gz 8899bf589185d49f747f3e5db9f0bde8a47245a100c64a3dd4d65e8e92cfc4f2 golangci-lint-1.59.0-freebsd-386.tar.gz
e75056e8b082386676ce23eba455cf893931a792c0d87e1e3743c0aec33c7fb5 golangci-lint-1.55.2-freebsd-amd64.tar.gz 658212f138d9df2ac89427e22115af34bf387c0871d70f2a25101718946a014f golangci-lint-1.59.0-freebsd-amd64.tar.gz
5789b933facaf6136bd23f1d50add67b79bbcf8dfdfc9069a37f729395940a66 golangci-lint-1.55.2-freebsd-armv6.tar.gz 4c6395ea40f314d3b6fa17d8997baab93464d5d1deeaab513155e625473bd03a golangci-lint-1.59.0-freebsd-armv6.tar.gz
7f21ab1008d05f32c954f99470fc86a83a059e530fe2add1d0b7d8ed4d8992a7 golangci-lint-1.55.2-freebsd-armv7.tar.gz ff37da4fbaacdb6bbae70fdbdbb1ba932a859956f788c82822fa06bef5b7c6b3 golangci-lint-1.59.0-freebsd-armv7.tar.gz
33ab06139b9219a28251f10821da94423db30285cc2af97494cbb2a281927de9 golangci-lint-1.55.2-illumos-amd64.tar.gz 439739469ed2bda182b1ec276d40c40e02f195537f78e3672996741ad223d6b6 golangci-lint-1.59.0-illumos-amd64.tar.gz
57ce6f8ce3ad6ee45d7cc3d9a047545a851c2547637834a3fcb086c7b40b1e6b golangci-lint-1.55.2-linux-386.tar.gz 940801d46790e40d0a097d8fee34e2606f0ef148cd039654029b0b8750a15ed6 golangci-lint-1.59.0-linux-386.tar.gz
ca21c961a33be3bc15e4292dc40c98c8dcc5463a7b6768a3afc123761630c09c golangci-lint-1.55.2-linux-amd64.tar.gz 3b14a439f33c4fff83dbe0349950d984042b9a1feb6c62f82787b598fc3ab5f4 golangci-lint-1.59.0-linux-amd64.tar.gz
8eb0cee9b1dbf0eaa49871798c7f8a5b35f2960c52d776a5f31eb7d886b92746 golangci-lint-1.55.2-linux-arm64.tar.gz c57e6c0b0fa03089a2611dceddd5bc5d206716cccdff8b149da8baac598719a1 golangci-lint-1.59.0-linux-arm64.tar.gz
3195f3e0f37d353fd5bd415cabcd4e263f5c29d3d0ffb176c26ff3d2c75eb3bb golangci-lint-1.55.2-linux-armv6.tar.gz 93149e2d3b25ac754df9a23172403d8aa6d021a7e0d9c090a12f51897f68c9a0 golangci-lint-1.59.0-linux-armv6.tar.gz
c823ee36eb1a719e171de1f2f5ca3068033dce8d9817232fd10ed71fd6650406 golangci-lint-1.55.2-linux-armv7.tar.gz d10ac38239d9efee3ee87b55c96cdf3fa09e1a525babe3ffdaaf65ccc48cf3dc golangci-lint-1.59.0-linux-armv7.tar.gz
758a5d2a356dc494bd13ed4c0d4bf5a54a4dc91267ea5ecdd87b86c7ca0624e7 golangci-lint-1.55.2-linux-loong64.tar.gz 047338114b4f0d5f08f0fb9a397b03cc171916ed0960be7dfb355c2320cd5e9c golangci-lint-1.59.0-linux-loong64.tar.gz
2c7b9abdce7cae802a67d583cd7c6dca520bff6d0e17c8535a918e2f2b437aa0 golangci-lint-1.55.2-linux-mips64.tar.gz 5632df0f7f8fc03a80a266130faef0b5902d280cf60621f1b2bdc1aef6d97ee9 golangci-lint-1.59.0-linux-mips64.tar.gz
024e0a15b85352cc27271285526e16a4ab66d3e67afbbe446c9808c06cb8dbed golangci-lint-1.55.2-linux-mips64le.tar.gz 71dd638c82fa4439171e7126d2c7a32b5d103bfdef282cea40c83632cb3d1f4b golangci-lint-1.59.0-linux-mips64le.tar.gz
6b00f89ba5506c1de1efdd9fa17c54093013a294fefd8b9b31534db626a672ee golangci-lint-1.55.2-linux-ppc64le.tar.gz 6cf9ea0d34e91669948483f9ae7f07da319a879344373a1981099fbd890cde00 golangci-lint-1.59.0-linux-ppc64le.tar.gz
0faa0d047d9bf7b703ed3ea65b6117043c93504f9ca1de25ae929d3901c73d4a golangci-lint-1.55.2-linux-riscv64.tar.gz af0205fa6fbab197cee613c359947711231739095d21b5c837086233b36ad971 golangci-lint-1.59.0-linux-riscv64.tar.gz
30dec9b22e7d5bb4e9d5ccea96da20f71cd7db3c8cf30b8ddc7cb9174c4d742a golangci-lint-1.55.2-linux-s390x.tar.gz a9d2fb93f3c688ebccef94f5dc96c0b07c4d20bf6556cddebd8442159b0c80f6 golangci-lint-1.59.0-linux-s390x.tar.gz
5a0ede48f79ad707902fdb29be8cd2abd8302dc122b65ebae3fdfc86751c7698 golangci-lint-1.55.2-netbsd-386.tar.gz 68ab4c57a847b8ace9679887f2f8b2b6760e57ee29dcde8c3f40dd8bb2654fa2 golangci-lint-1.59.0-netbsd-386.tar.gz
95af20a2e617126dd5b08122ece7819101070e1582a961067ce8c41172f901ad golangci-lint-1.55.2-netbsd-amd64.tar.gz d277b8b435c19406d00de4d509eadf5a024a5782878332e9a1b7c02bb76e87a7 golangci-lint-1.59.0-netbsd-amd64.tar.gz
94fb7dacb7527847cc95d7120904e19a2a0a81a0d50d61766c9e0251da72ab9d golangci-lint-1.55.2-netbsd-armv6.tar.gz 83211656be8dcfa1545af4f92894409f412d1f37566798cb9460a526593ad62c golangci-lint-1.59.0-netbsd-arm64.tar.gz
ca906bce5fee9619400e4a321c56476fe4a4efb6ac4fc989d340eb5563348873 golangci-lint-1.55.2-netbsd-armv7.tar.gz 6c6866d28bf79fa9817a0f7d2b050890ed109cae80bdb4dfa39536a7226da237 golangci-lint-1.59.0-netbsd-armv6.tar.gz
45b442f69fc8915c4500201c0247b7f3f69544dbc9165403a61f9095f2c57355 golangci-lint-1.55.2-windows-386.zip 11587566363bd03ca586b7df9776ccaed569fcd1f3489930ac02f9375b307503 golangci-lint-1.59.0-netbsd-armv7.tar.gz
f57d434d231d43417dfa631587522f8c1991220b43c8ffadb9c7bd279508bf81 golangci-lint-1.55.2-windows-amd64.zip 466181a8967bafa495e41494f93a0bec829c2cf715de874583b0460b3b8ae2b8 golangci-lint-1.59.0-windows-386.zip
fd7dc8f4c6829ee6fafb252a4d81d2155cd35da7833665cbb25d53ce7cecd990 golangci-lint-1.55.2-windows-arm64.zip 3317d8a87a99a49a0a1321d295c010790e6dbf43ee96b318f4b8bb23eae7a565 golangci-lint-1.59.0-windows-amd64.zip
1892c3c24f9e7ef44b02f6750c703864b6dc350129f3ec39510300007b2376f1 golangci-lint-1.55.2-windows-armv6.zip b3af955c7fceac8220a36fc799e1b3f19d3b247d32f422caac5f9845df8f7316 golangci-lint-1.59.0-windows-arm64.zip
a5e68ae73d38748b5269fad36ac7575e3c162a5dc63ef58abdea03cc5da4522a golangci-lint-1.55.2-windows-armv7.zip 6f083c7d0c764e5a0e5bde46ee3e91ae357d80c194190fe1d9754392e9064c7e golangci-lint-1.59.0-windows-armv6.zip
3709b4dd425deadab27748778d08e03c0f804d7748f7dd5b6bb488d98aa031c7 golangci-lint-1.59.0-windows-armv7.zip
# This is the builder on PPA that will build Go itself (inception-y), don't modify! # This is the builder on PPA that will build Go itself (inception-y), don't modify!
# #
# This version is fine to be old and full of security holes, we just use it # This version is fine to be old and full of security holes, we just use it
# to build the latest Go. Don't change it. If it ever becomes insufficient, # to build the latest Go. Don't change it.
# we need to switch over to a recursive builder to jump across supported
# versions.
# #
# version:ppa-builder 1.19.6 # version:ppa-builder-1 1.19.6
# https://go.dev/dl/ # https://go.dev/dl/
d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz d7f0013f82e6d7f862cc6cb5c8cdb48eef5f2e239b35baa97e2f1a7466043767 go1.19.6.src.tar.gz
# version:ppa-builder-2 1.21.9
# https://go.dev/dl/
58f0c5ced45a0012bce2ff7a9df03e128abcc8818ebabe5027bb92bafe20e421 go1.21.9.src.tar.gz

@ -117,24 +117,15 @@ var (
debEthereum, debEthereum,
} }
// Distros for which packages are created. // Distros for which packages are created
// Note: vivid is unsupported because there is no golang-1.6 package for it. debDistros = []string{
// Note: the following Ubuntu releases have been officially deprecated on Launchpad: "xenial", // 16.04, EOL: 04/2026
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite, impish, "bionic", // 18.04, EOL: 04/2028
// kinetic "focal", // 20.04, EOL: 04/2030
debDistroGoBoots = map[string]string{ "jammy", // 22.04, EOL: 04/2032
"trusty": "golang-1.11", // 14.04, EOL: 04/2024 "noble", // 24.04, EOL: 04/2034
"xenial": "golang-go", // 16.04, EOL: 04/2026
"bionic": "golang-go", // 18.04, EOL: 04/2028 "mantic", // 23.10, EOL: 07/2024
"focal": "golang-go", // 20.04, EOL: 04/2030
"jammy": "golang-go", // 22.04, EOL: 04/2032
"lunar": "golang-go", // 23.04, EOL: 01/2024
"mantic": "golang-go", // 23.10, EOL: 07/2024
}
debGoBootPaths = map[string]string{
"golang-1.11": "/usr/lib/go-1.11",
"golang-go": "/usr/lib/go",
} }
// This is where the tests should be unpacked. // This is where the tests should be unpacked.
@ -695,7 +686,7 @@ func doDebianSource(cmdline []string) {
} }
// Download and verify the Go source packages. // Download and verify the Go source packages.
var ( var (
gobootbundle = downloadGoBootstrapSources(*cachedir) gobootbundles = downloadGoBootstrapSources(*cachedir)
gobundle = downloadGoSources(*cachedir) gobundle = downloadGoSources(*cachedir)
) )
// Download all the dependencies needed to build the sources and run the ci script // Download all the dependencies needed to build the sources and run the ci script
@ -709,18 +700,20 @@ func doDebianSource(cmdline []string) {
// Create Debian packages and upload them. // Create Debian packages and upload them.
for _, pkg := range debPackages { for _, pkg := range debPackages {
for distro, goboot := range debDistroGoBoots { for _, distro := range debDistros {
// Prepare the debian package with the go-ethereum sources. // Prepare the debian package with the go-ethereum sources.
meta := newDebMetadata(distro, goboot, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables) meta := newDebMetadata(distro, *signer, env, now, pkg.Name, pkg.Version, pkg.Executables)
pkgdir := stageDebianSource(*workdir, meta) pkgdir := stageDebianSource(*workdir, meta)
// Add bootstrapper Go source code // Add bootstrapper Go source code
for i, gobootbundle := range gobootbundles {
if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil { if err := build.ExtractArchive(gobootbundle, pkgdir); err != nil {
log.Fatalf("Failed to extract bootstrapper Go sources: %v", err) log.Fatalf("Failed to extract bootstrapper Go sources: %v", err)
} }
if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, ".goboot")); err != nil { if err := os.Rename(filepath.Join(pkgdir, "go"), filepath.Join(pkgdir, fmt.Sprintf(".goboot-%d", i+1))); err != nil {
log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err) log.Fatalf("Failed to rename bootstrapper Go source folder: %v", err)
} }
}
// Add builder Go source code // Add builder Go source code
if err := build.ExtractArchive(gobundle, pkgdir); err != nil { if err := build.ExtractArchive(gobundle, pkgdir); err != nil {
log.Fatalf("Failed to extract builder Go sources: %v", err) log.Fatalf("Failed to extract builder Go sources: %v", err)
@ -755,11 +748,14 @@ func doDebianSource(cmdline []string) {
} }
} }
// downloadGoBootstrapSources downloads the Go source tarball that will be used // downloadGoBootstrapSources downloads the Go source tarball(s) that will be used
// to bootstrap the builder Go. // to bootstrap the builder Go.
func downloadGoBootstrapSources(cachedir string) string { func downloadGoBootstrapSources(cachedir string) []string {
csdb := build.MustLoadChecksums("build/checksums.txt") csdb := build.MustLoadChecksums("build/checksums.txt")
gobootVersion, err := build.Version(csdb, "ppa-builder")
var bundles []string
for _, booter := range []string{"ppa-builder-1", "ppa-builder-2"} {
gobootVersion, err := build.Version(csdb, booter)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -769,7 +765,9 @@ func downloadGoBootstrapSources(cachedir string) string {
if err := csdb.DownloadFile(url, dst); err != nil { if err := csdb.DownloadFile(url, dst); err != nil {
log.Fatal(err) log.Fatal(err)
} }
return dst bundles = append(bundles, dst)
}
return bundles
} }
// downloadGoSources downloads the Go source tarball. // downloadGoSources downloads the Go source tarball.
@ -848,9 +846,6 @@ type debPackage struct {
type debMetadata struct { type debMetadata struct {
Env build.Environment Env build.Environment
GoBootPackage string
GoBootPath string
PackageName string PackageName string
// go-ethereum version being built. Note that this // go-ethereum version being built. Note that this
@ -878,14 +873,12 @@ func (d debExecutable) Package() string {
return d.BinaryName return d.BinaryName
} }
func newDebMetadata(distro, goboot, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata { func newDebMetadata(distro, author string, env build.Environment, t time.Time, name string, version string, exes []debExecutable) debMetadata {
if author == "" { if author == "" {
// No signing key, use default author. // No signing key, use default author.
author = "Ethereum Builds <fjl@ethereum.org>" author = "Ethereum Builds <fjl@ethereum.org>"
} }
return debMetadata{ return debMetadata{
GoBootPackage: goboot,
GoBootPath: debGoBootPaths[goboot],
PackageName: name, PackageName: name,
Env: env, Env: env,
Author: author, Author: author,

@ -2,7 +2,7 @@ Source: {{.Name}}
Section: science Section: science
Priority: extra Priority: extra
Maintainer: {{.Author}} Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}} Build-Depends: debhelper (>= 8.0.0), golang-go
Standards-Version: 3.9.5 Standards-Version: 3.9.5
Homepage: https://ethereum.org Homepage: https://ethereum.org
Vcs-Git: https://github.com/ethereum/go-ethereum.git Vcs-Git: https://github.com/ethereum/go-ethereum.git

@ -7,7 +7,7 @@
# Launchpad rejects Go's access to $HOME, use custom folders # Launchpad rejects Go's access to $HOME, use custom folders
export GOCACHE=/tmp/go-build export GOCACHE=/tmp/go-build
export GOPATH=/tmp/gopath export GOPATH=/tmp/gopath
export GOROOT_BOOTSTRAP={{.GoBootPath}} export GOROOT_BOOTSTRAP=/usr/lib/go
override_dh_auto_clean: override_dh_auto_clean:
# Don't try to be smart Launchpad, we know our build rules better than you # Don't try to be smart Launchpad, we know our build rules better than you
@ -19,8 +19,9 @@ override_dh_auto_build:
# #
# We're also shipping the bootstrapper as of Go 1.20 as it had minimum version # We're also shipping the bootstrapper as of Go 1.20 as it had minimum version
# requirements opposed to older versions of Go. # requirements opposed to older versions of Go.
(mv .goboot ../ && cd ../.goboot/src && ./make.bash) (mv .goboot-1 ../ && cd ../.goboot-1/src && ./make.bash)
(mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot ./make.bash) (mv .goboot-2 ../ && cd ../.goboot-2/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-1 ./make.bash)
(mv .go ../ && cd ../.go/src && GOROOT_BOOTSTRAP=`pwd`/../../.goboot-2 ./make.bash)
# We can't download external go modules within Launchpad, so we're shipping the # We can't download external go modules within Launchpad, so we're shipping the
# entire dependency source cache with go-ethereum. # entire dependency source cache with go-ethereum.

@ -46,13 +46,12 @@ import (
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime" "runtime"
"slices"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"text/template" "text/template"
"time" "time"
"golang.org/x/exp/slices"
) )
var ( var (
@ -292,8 +291,8 @@ func writeAuthors(files []string) {
} }
} }
// Write sorted list of authors back to the file. // Write sorted list of authors back to the file.
slices.SortFunc(list, func(a, b string) bool { slices.SortFunc(list, func(a, b string) int {
return strings.ToLower(a) < strings.ToLower(b) return strings.Compare(strings.ToLower(a), strings.ToLower(b))
}) })
content := new(bytes.Buffer) content := new(bytes.Buffer)
content.WriteString(authorsFileHeader) content.WriteString(authorsFileHeader)

@ -0,0 +1,122 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"fmt"
"io"
"os"
"github.com/ethereum/go-ethereum/beacon/blsync"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
"github.com/urfave/cli/v2"
)
var (
verbosityFlag = &cli.IntFlag{
Name: "verbosity",
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
Value: 3,
Category: flags.LoggingCategory,
}
vmoduleFlag = &cli.StringFlag{
Name: "vmodule",
Usage: "Per-module verbosity: comma-separated list of <pattern>=<level> (e.g. eth/*=5,p2p=4)",
Value: "",
Hidden: true,
Category: flags.LoggingCategory,
}
)
func main() {
app := flags.NewApp("beacon light syncer tool")
app.Flags = []cli.Flag{
utils.BeaconApiFlag,
utils.BeaconApiHeaderFlag,
utils.BeaconThresholdFlag,
utils.BeaconNoFilterFlag,
utils.BeaconConfigFlag,
utils.BeaconGenesisRootFlag,
utils.BeaconGenesisTimeFlag,
utils.BeaconCheckpointFlag,
//TODO datadir for optional permanent database
utils.MainnetFlag,
utils.SepoliaFlag,
utils.GoerliFlag,
utils.BlsyncApiFlag,
utils.BlsyncJWTSecretFlag,
verbosityFlag,
vmoduleFlag,
}
app.Action = sync
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func sync(ctx *cli.Context) error {
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
output := io.Writer(os.Stderr)
if usecolor {
output = colorable.NewColorable(os.Stderr)
}
verbosity := log.FromLegacyLevel(ctx.Int(verbosityFlag.Name))
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(output, verbosity, usecolor)))
// set up blsync
client := blsync.NewClient(ctx)
client.SetEngineRPC(makeRPCClient(ctx))
client.Start()
// run until stopped
<-ctx.Done()
client.Stop()
return nil
}
func makeRPCClient(ctx *cli.Context) *rpc.Client {
if !ctx.IsSet(utils.BlsyncApiFlag.Name) {
log.Warn("No engine API target specified, performing a dry run")
return nil
}
if !ctx.IsSet(utils.BlsyncJWTSecretFlag.Name) {
utils.Fatalf("JWT secret parameter missing") //TODO use default if datadir is specified
}
engineApiUrl, jwtFileName := ctx.String(utils.BlsyncApiFlag.Name), ctx.String(utils.BlsyncJWTSecretFlag.Name)
var jwtSecret [32]byte
if jwt, err := node.ObtainJWTSecret(jwtFileName); err == nil {
copy(jwtSecret[:], jwt)
} else {
utils.Fatalf("Error loading or generating JWT secret: %v", err)
}
auth := node.NewJWTAuth(jwtSecret)
cl, err := rpc.DialOptions(context.Background(), engineApiUrl, rpc.WithHTTPAuth(auth))
if err != nil {
utils.Fatalf("Could not create RPC client: %v", err)
}
return cl
}

@ -225,7 +225,7 @@ Response
- `value` [number:optional]: amount of Wei to send with the transaction - `value` [number:optional]: amount of Wei to send with the transaction
- `data` [data:optional]: input data - `data` [data:optional]: input data
- `nonce` [number]: account nonce - `nonce` [number]: account nonce
1. method signature [string:optional] 2. method signature [string:optional]
- The method signature, if present, is to aid decoding the calldata. Should consist of `methodname(paramtype,...)`, e.g. `transfer(uint256,address)`. The signer may use this data to parse the supplied calldata, and show the user. The data, however, is considered totally untrusted, and reliability is not expected. - The method signature, if present, is to aid decoding the calldata. Should consist of `methodname(paramtype,...)`, e.g. `transfer(uint256,address)`. The signer may use this data to parse the supplied calldata, and show the user. The data, however, is considered totally untrusted, and reliability is not expected.

@ -552,7 +552,7 @@ func listWallets(c *cli.Context) error {
// accountImport imports a raw hexadecimal private key via CLI. // accountImport imports a raw hexadecimal private key via CLI.
func accountImport(c *cli.Context) error { func accountImport(c *cli.Context) error {
if c.Args().Len() != 1 { if c.Args().Len() != 1 {
return errors.New("<keyfile> must be given as first argument.") return errors.New("<keyfile> must be given as first argument")
} }
internalApi, ui, err := initInternalApi(c) internalApi, ui, err := initInternalApi(c)
if err != nil { if err != nil {

@ -9,14 +9,14 @@ It enables usecases like the following:
The two main features that are required for this to work well are; The two main features that are required for this to work well are;
1. Rule Implementation: how to create, manage and interpret rules in a flexible but secure manner 1. Rule Implementation: how to create, manage, and interpret rules in a flexible but secure manner
2. Credential managements and credentials; how to provide auto-unlock without exposing keys unnecessarily. 2. Credential management and credentials; how to provide auto-unlock without exposing keys unnecessarily.
The section below deals with both of them The section below deals with both of them
## Rule Implementation ## Rule Implementation
A ruleset file is implemented as a `js` file. Under the hood, the ruleset-engine is a `SignerUI`, implementing the same methods as the `json-rpc` methods A ruleset file is implemented as a `js` file. Under the hood, the ruleset engine is a `SignerUI`, implementing the same methods as the `json-rpc` methods
defined in the UI protocol. Example: defined in the UI protocol. Example:
```js ```js
@ -27,7 +27,7 @@ function asBig(str) {
return new BigNumber(str) return new BigNumber(str)
} }
// Approve transactions to a certain contract if value is below a certain limit // Approve transactions to a certain contract if the value is below a certain limit
function ApproveTx(req) { function ApproveTx(req) {
var limit = big.Newint("0xb1a2bc2ec50000") var limit = big.Newint("0xb1a2bc2ec50000")
var value = asBig(req.transaction.value); var value = asBig(req.transaction.value);
@ -70,7 +70,7 @@ The Otto vm has a few [caveats](https://github.com/robertkrimen/otto):
Additionally, a few more have been added Additionally, a few more have been added
* The rule execution cannot load external javascript files. * The rule execution cannot load external javascript files.
* The only preloaded library is [`bignumber.js`](https://github.com/MikeMcl/bignumber.js) version `2.0.3`. This one is fairly old, and is not aligned with the documentation at the github repository. * The only preloaded library is [`bignumber.js`](https://github.com/MikeMcl/bignumber.js) version `2.0.3`. This one is fairly old, and is not aligned with the documentation at the GitHub repository.
* Each invocation is made in a fresh virtual machine. This means that you cannot store data in global variables between invocations. This is a deliberate choice -- if you want to store data, use the disk-backed `storage`, since rules should not rely on ephemeral data. * Each invocation is made in a fresh virtual machine. This means that you cannot store data in global variables between invocations. This is a deliberate choice -- if you want to store data, use the disk-backed `storage`, since rules should not rely on ephemeral data.
* Javascript API parameters are _always_ an object. This is also a design choice, to ensure that parameters are accessed by _key_ and not by order. This is to prevent mistakes due to missing parameters or parameter changes. * Javascript API parameters are _always_ an object. This is also a design choice, to ensure that parameters are accessed by _key_ and not by order. This is to prevent mistakes due to missing parameters or parameter changes.
* The JS engine has access to `storage` and `console`. * The JS engine has access to `storage` and `console`.
@ -88,8 +88,8 @@ Some security precautions can be made, such as:
##### Security of implementation ##### Security of implementation
The drawbacks of this very flexible solution is that the `signer` needs to contain a javascript engine. This is pretty simple to implement, since it's already The drawback of this very flexible solution is that the `signer` needs to contain a javascript engine. This is pretty simple to implement since it's already
implemented for `geth`. There are no known security vulnerabilities in, nor have we had any security-problems with it so far. implemented for `geth`. There are no known security vulnerabilities in it, nor have we had any security problems with it so far.
The javascript engine would be an added attack surface; but if the validation of `rulesets` is made good (with hash-based attestation), the actual javascript cannot be considered The javascript engine would be an added attack surface; but if the validation of `rulesets` is made good (with hash-based attestation), the actual javascript cannot be considered
an attack surface -- if an attacker can control the ruleset, a much simpler attack would be to implement an "always-approve" rule instead of exploiting the js vm. The only benefit an attack surface -- if an attacker can control the ruleset, a much simpler attack would be to implement an "always-approve" rule instead of exploiting the js vm. The only benefit
@ -105,7 +105,7 @@ It's unclear whether any other DSL could be more secure; since there's always th
## Credential management ## Credential management
The ability to auto-approve transaction means that the signer needs to have necessary credentials to decrypt keyfiles. These passwords are hereafter called `ksp` (keystore pass). The ability to auto-approve transactions means that the signer needs to have the necessary credentials to decrypt keyfiles. These passwords are hereafter called `ksp` (keystore pass).
### Example implementation ### Example implementation
@ -127,8 +127,8 @@ The `vault.dat` would be an encrypted container storing the following informatio
### Security considerations ### Security considerations
This would leave it up to the user to ensure that the `path/to/masterseed` is handled in a secure way. It's difficult to get around this, although one could This would leave it up to the user to ensure that the `path/to/masterseed` is handled securely. It's difficult to get around this, although one could
imagine leveraging OS-level keychains where supported. The setup is however in general similar to how ssh-keys are stored in `.ssh/`. imagine leveraging OS-level keychains where supported. The setup is however, in general, similar to how ssh-keys are stored in `.ssh/`.
# Implementation status # Implementation status
@ -163,7 +163,7 @@ function isLimitOk(transaction) {
if (stored != "") { if (stored != "") {
txs = JSON.parse(stored) txs = JSON.parse(stored)
} }
// First, remove all that have passed out of the time-window // First, remove all that has passed out of the time window
var newtxs = txs.filter(function(tx){return tx.tstamp > windowstart}); var newtxs = txs.filter(function(tx){return tx.tstamp > windowstart});
console.log(txs, newtxs.length); console.log(txs, newtxs.length);
@ -174,7 +174,7 @@ function isLimitOk(transaction) {
console.log("ApproveTx > Sum so far", sum); console.log("ApproveTx > Sum so far", sum);
console.log("ApproveTx > Requested", value.toNumber()); console.log("ApproveTx > Requested", value.toNumber());
// Would we exceed weekly limit ? // Would we exceed the weekly limit ?
return sum.plus(value).lt(limit) return sum.plus(value).lt(limit)
} }

@ -20,6 +20,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net" "net"
"net/http"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -28,9 +29,11 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -45,6 +48,7 @@ var (
discv4ResolveJSONCommand, discv4ResolveJSONCommand,
discv4CrawlCommand, discv4CrawlCommand,
discv4TestCommand, discv4TestCommand,
discv4ListenCommand,
}, },
} }
discv4PingCommand = &cli.Command{ discv4PingCommand = &cli.Command{
@ -75,6 +79,14 @@ var (
Flags: discoveryNodeFlags, Flags: discoveryNodeFlags,
ArgsUsage: "<nodes.json file>", ArgsUsage: "<nodes.json file>",
} }
discv4ListenCommand = &cli.Command{
Name: "listen",
Usage: "Runs a discovery node",
Action: discv4Listen,
Flags: flags.Merge(discoveryNodeFlags, []cli.Flag{
httpAddrFlag,
}),
}
discv4CrawlCommand = &cli.Command{ discv4CrawlCommand = &cli.Command{
Name: "crawl", Name: "crawl",
Usage: "Updates a nodes.json file with random nodes found in the DHT", Usage: "Updates a nodes.json file with random nodes found in the DHT",
@ -131,6 +143,10 @@ var (
Usage: "Enode of the remote node under test", Usage: "Enode of the remote node under test",
EnvVars: []string{"REMOTE_ENODE"}, EnvVars: []string{"REMOTE_ENODE"},
} }
httpAddrFlag = &cli.StringFlag{
Name: "rpc",
Usage: "HTTP server listening address",
}
) )
var discoveryNodeFlags = []cli.Flag{ var discoveryNodeFlags = []cli.Flag{
@ -154,6 +170,27 @@ func discv4Ping(ctx *cli.Context) error {
return nil return nil
} }
func discv4Listen(ctx *cli.Context) error {
disc, _ := startV4(ctx)
defer disc.Close()
fmt.Println(disc.Self())
httpAddr := ctx.String(httpAddrFlag.Name)
if httpAddr == "" {
// Non-HTTP mode.
select {}
}
api := &discv4API{disc}
log.Info("Starting RPC API server", "addr", httpAddr)
srv := rpc.NewServer()
srv.RegisterName("discv4", api)
http.DefaultServeMux.Handle("/", srv)
httpsrv := http.Server{Addr: httpAddr, Handler: http.DefaultServeMux}
return httpsrv.ListenAndServe()
}
func discv4RequestRecord(ctx *cli.Context) error { func discv4RequestRecord(ctx *cli.Context) error {
n := getNodeArg(ctx) n := getNodeArg(ctx)
disc, _ := startV4(ctx) disc, _ := startV4(ctx)
@ -362,3 +399,23 @@ func parseBootnodes(ctx *cli.Context) ([]*enode.Node, error) {
} }
return nodes, nil return nodes, nil
} }
type discv4API struct {
host *discover.UDPv4
}
func (api *discv4API) LookupRandom(n int) (ns []*enode.Node) {
it := api.host.RandomNodes()
for len(ns) < n && it.Next() {
ns = append(ns, it.Node())
}
return ns
}
func (api *discv4API) Buckets() [][]discover.BucketNode {
return api.host.TableBuckets()
}
func (api *discv4API) Self() *enode.Node {
return api.host.Self()
}

@ -20,6 +20,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"slices"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -32,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/dnsdisc"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"golang.org/x/exp/slices"
) )
const ( const (

@ -183,8 +183,8 @@ var attrFormatters = map[string]func(rlp.RawValue) (string, bool){
} }
func formatAttrRaw(v rlp.RawValue) (string, bool) { func formatAttrRaw(v rlp.RawValue) (string, bool) {
s := hex.EncodeToString(v) content, _, err := rlp.SplitString(v)
return s, true return hex.EncodeToString(content), err == nil
} }
func formatAttrString(v rlp.RawValue) (string, bool) { func formatAttrString(v rlp.RawValue) (string, bool) {

@ -26,7 +26,8 @@ import (
"io" "io"
"math/big" "math/big"
"os" "os"
"path" "path/filepath"
"slices"
"sort" "sort"
"strings" "strings"
@ -40,7 +41,6 @@ import (
"github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"golang.org/x/exp/slices"
) )
// Chain is a lightweight blockchain-like store which can read a hivechain // Chain is a lightweight blockchain-like store which can read a hivechain
@ -56,21 +56,21 @@ type Chain struct {
// NewChain takes the given chain.rlp file, and decodes and returns // NewChain takes the given chain.rlp file, and decodes and returns
// the blocks from the file. // the blocks from the file.
func NewChain(dir string) (*Chain, error) { func NewChain(dir string) (*Chain, error) {
gen, err := loadGenesis(path.Join(dir, "genesis.json")) gen, err := loadGenesis(filepath.Join(dir, "genesis.json"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
gblock := gen.ToBlock() gblock := gen.ToBlock()
blocks, err := blocksFromFile(path.Join(dir, "chain.rlp"), gblock) blocks, err := blocksFromFile(filepath.Join(dir, "chain.rlp"), gblock)
if err != nil { if err != nil {
return nil, err return nil, err
} }
state, err := readState(path.Join(dir, "headstate.json")) state, err := readState(filepath.Join(dir, "headstate.json"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
accounts, err := readAccounts(path.Join(dir, "accounts.json")) accounts, err := readAccounts(filepath.Join(dir, "accounts.json"))
if err != nil { if err != nil {
return nil, err return nil, err
} }

@ -53,7 +53,8 @@ func (s *Suite) dial() (*Conn, error) {
// dialAs attempts to dial a given node and perform a handshake using the given // dialAs attempts to dial a given node and perform a handshake using the given
// private key. // private key.
func (s *Suite) dialAs(key *ecdsa.PrivateKey) (*Conn, error) { func (s *Suite) dialAs(key *ecdsa.PrivateKey) (*Conn, error) {
fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) tcpEndpoint, _ := s.Dest.TCPEndpoint()
fd, err := net.Dial("tcp", tcpEndpoint.String())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -166,7 +167,7 @@ func (c *Conn) ReadEth() (any, error) {
case eth.TransactionsMsg: case eth.TransactionsMsg:
msg = new(eth.TransactionsPacket) msg = new(eth.TransactionsPacket)
case eth.NewPooledTransactionHashesMsg: case eth.NewPooledTransactionHashesMsg:
msg = new(eth.NewPooledTransactionHashesPacket68) msg = new(eth.NewPooledTransactionHashesPacket)
case eth.GetPooledTransactionsMsg: case eth.GetPooledTransactionsMsg:
msg = new(eth.GetPooledTransactionsPacket) msg = new(eth.GetPooledTransactionsPacket)
case eth.PooledTransactionsMsg: case eth.PooledTransactionsMsg:

@ -22,7 +22,7 @@ import (
"io" "io"
"net/http" "net/http"
"os" "os"
"path" "path/filepath"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -38,7 +38,7 @@ type EngineClient struct {
// NewEngineClient creates a new engine client. // NewEngineClient creates a new engine client.
func NewEngineClient(dir, url, jwt string) (*EngineClient, error) { func NewEngineClient(dir, url, jwt string) (*EngineClient, error) {
headfcu, err := os.ReadFile(path.Join(dir, "headfcu.json")) headfcu, err := os.ReadFile(filepath.Join(dir, "headfcu.json"))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read headfcu: %w", err) return nil, fmt.Errorf("failed to read headfcu: %w", err)
} }

@ -32,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/internal/utesting"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode" "github.com/ethereum/go-ethereum/trie/trienode"
"golang.org/x/crypto/sha3"
) )
func (c *Conn) snapRequest(code uint64, msg any) (any, error) { func (c *Conn) snapRequest(code uint64, msg any) (any, error) {
@ -648,7 +647,7 @@ The server should reject the request.`,
0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}}, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}},
}, },
nBytes: 5000, nBytes: 5000,
expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")}, expHashes: []common.Hash{types.EmptyCodeHash},
}, },
{ {
@ -905,7 +904,7 @@ func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error {
// that the serving node is missing // that the serving node is missing
var ( var (
bytecodes = res.Codes bytecodes = res.Codes
hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState) hasher = crypto.NewKeccakState()
hash = make([]byte, 32) hash = make([]byte, 32)
codes = make([][]byte, len(req.Hashes)) codes = make([][]byte, len(req.Hashes))
) )
@ -964,7 +963,7 @@ func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
// Cross reference the requested trienodes with the response to find gaps // Cross reference the requested trienodes with the response to find gaps
// that the serving node is missing // that the serving node is missing
hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) hasher := crypto.NewKeccakState()
hash := make([]byte, 32) hash := make([]byte, 32)
trienodes := res.Nodes trienodes := res.Nodes
if got, want := len(trienodes), len(tc.expHashes); got != want { if got, want := len(trienodes), len(tc.expHashes); got != want {

@ -64,23 +64,23 @@ func NewSuite(dest *enode.Node, chainDir, engineURL, jwt string) (*Suite, error)
func (s *Suite) EthTests() []utesting.Test { func (s *Suite) EthTests() []utesting.Test {
return []utesting.Test{ return []utesting.Test{
// status // status
{Name: "TestStatus", Fn: s.TestStatus}, {Name: "Status", Fn: s.TestStatus},
// get block headers // get block headers
{Name: "TestGetBlockHeaders", Fn: s.TestGetBlockHeaders}, {Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders},
{Name: "TestSimultaneousRequests", Fn: s.TestSimultaneousRequests}, {Name: "SimultaneousRequests", Fn: s.TestSimultaneousRequests},
{Name: "TestSameRequestID", Fn: s.TestSameRequestID}, {Name: "SameRequestID", Fn: s.TestSameRequestID},
{Name: "TestZeroRequestID", Fn: s.TestZeroRequestID}, {Name: "ZeroRequestID", Fn: s.TestZeroRequestID},
// get block bodies // get block bodies
{Name: "TestGetBlockBodies", Fn: s.TestGetBlockBodies}, {Name: "GetBlockBodies", Fn: s.TestGetBlockBodies},
// // malicious handshakes + status // // malicious handshakes + status
{Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, {Name: "MaliciousHandshake", Fn: s.TestMaliciousHandshake},
{Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, {Name: "MaliciousStatus", Fn: s.TestMaliciousStatus},
// test transactions // test transactions
{Name: "TestLargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true}, {Name: "LargeTxRequest", Fn: s.TestLargeTxRequest, Slow: true},
{Name: "TestTransaction", Fn: s.TestTransaction}, {Name: "Transaction", Fn: s.TestTransaction},
{Name: "TestInvalidTxs", Fn: s.TestInvalidTxs}, {Name: "InvalidTxs", Fn: s.TestInvalidTxs},
{Name: "TestNewPooledTxs", Fn: s.TestNewPooledTxs}, {Name: "NewPooledTxs", Fn: s.TestNewPooledTxs},
{Name: "TestBlobViolations", Fn: s.TestBlobViolations}, {Name: "BlobViolations", Fn: s.TestBlobViolations},
} }
} }
@ -94,9 +94,9 @@ func (s *Suite) SnapTests() []utesting.Test {
} }
} }
// TestStatus attempts to connect to the given node and exchange a status
// message with it on the eth protocol.
func (s *Suite) TestStatus(t *utesting.T) { func (s *Suite) TestStatus(t *utesting.T) {
t.Log(`This test is just a sanity check. It performs an eth protocol handshake.`)
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -112,9 +112,9 @@ func headersMatch(expected []*types.Header, headers []*types.Header) bool {
return reflect.DeepEqual(expected, headers) return reflect.DeepEqual(expected, headers)
} }
// TestGetBlockHeaders tests whether the given node can respond to an eth
// `GetBlockHeaders` request and that the response is accurate.
func (s *Suite) TestGetBlockHeaders(t *utesting.T) { func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
t.Log(`This test requests block headers from the node.`)
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -154,10 +154,10 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) {
} }
} }
// TestSimultaneousRequests sends two simultaneous `GetBlockHeader` requests
// from the same connection with different request IDs and checks to make sure
// the node responds with the correct headers per request.
func (s *Suite) TestSimultaneousRequests(t *utesting.T) { func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
t.Log(`This test requests blocks headers from the node, performing two requests
concurrently, with different request IDs.`)
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -228,9 +228,10 @@ func (s *Suite) TestSimultaneousRequests(t *utesting.T) {
} }
} }
// TestSameRequestID sends two requests with the same request ID to a single
// node.
func (s *Suite) TestSameRequestID(t *utesting.T) { func (s *Suite) TestSameRequestID(t *utesting.T) {
t.Log(`This test requests block headers, performing two concurrent requests with the
same request ID. The node should handle the request by responding to both requests.`)
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -298,9 +299,10 @@ func (s *Suite) TestSameRequestID(t *utesting.T) {
} }
} }
// TestZeroRequestID checks that a message with a request ID of zero is still handled
// by the node.
func (s *Suite) TestZeroRequestID(t *utesting.T) { func (s *Suite) TestZeroRequestID(t *utesting.T) {
t.Log(`This test sends a GetBlockHeaders message with a request-id of zero,
and expects a response.`)
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -333,9 +335,9 @@ func (s *Suite) TestZeroRequestID(t *utesting.T) {
} }
} }
// TestGetBlockBodies tests whether the given node can respond to a
// `GetBlockBodies` request and that the response is accurate.
func (s *Suite) TestGetBlockBodies(t *utesting.T) { func (s *Suite) TestGetBlockBodies(t *utesting.T) {
t.Log(`This test sends GetBlockBodies requests to the node for known blocks in the test chain.`)
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -376,12 +378,12 @@ func randBuf(size int) []byte {
return buf return buf
} }
// TestMaliciousHandshake tries to send malicious data during the handshake.
func (s *Suite) TestMaliciousHandshake(t *utesting.T) { func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
key, _ := crypto.GenerateKey() t.Log(`This test tries to send malicious data during the devp2p handshake, in various ways.`)
// Write hello to client. // Write hello to client.
var ( var (
key, _ = crypto.GenerateKey()
pub0 = crypto.FromECDSAPub(&key.PublicKey)[1:] pub0 = crypto.FromECDSAPub(&key.PublicKey)[1:]
version = eth.ProtocolVersions[0] version = eth.ProtocolVersions[0]
) )
@ -451,8 +453,9 @@ func (s *Suite) TestMaliciousHandshake(t *utesting.T) {
} }
} }
// TestMaliciousStatus sends a status package with a large total difficulty.
func (s *Suite) TestMaliciousStatus(t *utesting.T) { func (s *Suite) TestMaliciousStatus(t *utesting.T) {
t.Log(`This test sends a malicious eth Status message to the node and expects a disconnect.`)
conn, err := s.dial() conn, err := s.dial()
if err != nil { if err != nil {
t.Fatalf("dial failed: %v", err) t.Fatalf("dial failed: %v", err)
@ -486,9 +489,10 @@ func (s *Suite) TestMaliciousStatus(t *utesting.T) {
} }
} }
// TestTransaction sends a valid transaction to the node and checks if the
// transaction gets propagated.
func (s *Suite) TestTransaction(t *utesting.T) { func (s *Suite) TestTransaction(t *utesting.T) {
t.Log(`This test sends a valid transaction to the node and checks if the
transaction gets propagated.`)
// Nudge client out of syncing mode to accept pending txs. // Nudge client out of syncing mode to accept pending txs.
if err := s.engine.sendForkchoiceUpdated(); err != nil { if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("failed to send next block: %v", err) t.Fatalf("failed to send next block: %v", err)
@ -507,15 +511,16 @@ func (s *Suite) TestTransaction(t *utesting.T) {
if err != nil { if err != nil {
t.Fatalf("failed to sign tx: %v", err) t.Fatalf("failed to sign tx: %v", err)
} }
if err := s.sendTxs([]*types.Transaction{tx}); err != nil { if err := s.sendTxs(t, []*types.Transaction{tx}); err != nil {
t.Fatal(err) t.Fatal(err)
} }
s.chain.IncNonce(from, 1) s.chain.IncNonce(from, 1)
} }
// TestInvalidTxs sends several invalid transactions and tests whether
// the node will propagate them.
func (s *Suite) TestInvalidTxs(t *utesting.T) { func (s *Suite) TestInvalidTxs(t *utesting.T) {
t.Log(`This test sends several kinds of invalid transactions and checks that the node
does not propagate them.`)
// Nudge client out of syncing mode to accept pending txs. // Nudge client out of syncing mode to accept pending txs.
if err := s.engine.sendForkchoiceUpdated(); err != nil { if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("failed to send next block: %v", err) t.Fatalf("failed to send next block: %v", err)
@ -534,7 +539,7 @@ func (s *Suite) TestInvalidTxs(t *utesting.T) {
if err != nil { if err != nil {
t.Fatalf("failed to sign tx: %v", err) t.Fatalf("failed to sign tx: %v", err)
} }
if err := s.sendTxs([]*types.Transaction{tx}); err != nil { if err := s.sendTxs(t, []*types.Transaction{tx}); err != nil {
t.Fatalf("failed to send txs: %v", err) t.Fatalf("failed to send txs: %v", err)
} }
s.chain.IncNonce(from, 1) s.chain.IncNonce(from, 1)
@ -590,14 +595,15 @@ func (s *Suite) TestInvalidTxs(t *utesting.T) {
} }
txs = append(txs, tx) txs = append(txs, tx)
} }
if err := s.sendInvalidTxs(txs); err != nil { if err := s.sendInvalidTxs(t, txs); err != nil {
t.Fatalf("failed to send invalid txs: %v", err) t.Fatalf("failed to send invalid txs: %v", err)
} }
} }
// TestLargeTxRequest tests whether a node can fulfill a large GetPooledTransactions
// request.
func (s *Suite) TestLargeTxRequest(t *utesting.T) { func (s *Suite) TestLargeTxRequest(t *utesting.T) {
t.Log(`This test first send ~2000 transactions to the node, then requests them
on another peer connection using GetPooledTransactions.`)
// Nudge client out of syncing mode to accept pending txs. // Nudge client out of syncing mode to accept pending txs.
if err := s.engine.sendForkchoiceUpdated(); err != nil { if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("failed to send next block: %v", err) t.Fatalf("failed to send next block: %v", err)
@ -630,7 +636,7 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
s.chain.IncNonce(from, uint64(count)) s.chain.IncNonce(from, uint64(count))
// Send txs. // Send txs.
if err := s.sendTxs(txs); err != nil { if err := s.sendTxs(t, txs); err != nil {
t.Fatalf("failed to send txs: %v", err) t.Fatalf("failed to send txs: %v", err)
} }
@ -667,13 +673,15 @@ func (s *Suite) TestLargeTxRequest(t *utesting.T) {
} }
} }
// TestNewPooledTxs tests whether a node will do a GetPooledTransactions request
// upon receiving a NewPooledTransactionHashes announcement.
func (s *Suite) TestNewPooledTxs(t *utesting.T) { func (s *Suite) TestNewPooledTxs(t *utesting.T) {
t.Log(`This test announces transaction hashes to the node and expects it to fetch
the transactions using a GetPooledTransactions request.`)
// Nudge client out of syncing mode to accept pending txs. // Nudge client out of syncing mode to accept pending txs.
if err := s.engine.sendForkchoiceUpdated(); err != nil { if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("failed to send next block: %v", err) t.Fatalf("failed to send next block: %v", err)
} }
var ( var (
count = 50 count = 50
from, nonce = s.chain.GetSender(1) from, nonce = s.chain.GetSender(1)
@ -710,7 +718,7 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
} }
// Send announcement. // Send announcement.
ann := eth.NewPooledTransactionHashesPacket68{Types: txTypes, Sizes: sizes, Hashes: hashes} ann := eth.NewPooledTransactionHashesPacket{Types: txTypes, Sizes: sizes, Hashes: hashes}
err = conn.Write(ethProto, eth.NewPooledTransactionHashesMsg, ann) err = conn.Write(ethProto, eth.NewPooledTransactionHashesMsg, ann)
if err != nil { if err != nil {
t.Fatalf("failed to write to connection: %v", err) t.Fatalf("failed to write to connection: %v", err)
@ -728,7 +736,7 @@ func (s *Suite) TestNewPooledTxs(t *utesting.T) {
t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest)) t.Fatalf("unexpected number of txs requested: wanted %d, got %d", len(hashes), len(msg.GetPooledTransactionsRequest))
} }
return return
case *eth.NewPooledTransactionHashesPacket68: case *eth.NewPooledTransactionHashesPacket:
continue continue
case *eth.TransactionsPacket: case *eth.TransactionsPacket:
continue continue
@ -746,8 +754,8 @@ func makeSidecar(data ...byte) *types.BlobTxSidecar {
) )
for i := range blobs { for i := range blobs {
blobs[i][0] = data[i] blobs[i][0] = data[i]
c, _ := kzg4844.BlobToCommitment(blobs[i]) c, _ := kzg4844.BlobToCommitment(&blobs[i])
p, _ := kzg4844.ComputeBlobProof(blobs[i], c) p, _ := kzg4844.ComputeBlobProof(&blobs[i], c)
commitments = append(commitments, c) commitments = append(commitments, c)
proofs = append(proofs, p) proofs = append(proofs, p)
} }
@ -762,7 +770,7 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra
from, nonce := s.chain.GetSender(5) from, nonce := s.chain.GetSender(5)
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
// Make blob data, max of 2 blobs per tx. // Make blob data, max of 2 blobs per tx.
blobdata := make([]byte, blobs%2) blobdata := make([]byte, blobs%3)
for i := range blobdata { for i := range blobdata {
blobdata[i] = discriminator blobdata[i] = discriminator
blobs -= 1 blobs -= 1
@ -787,6 +795,8 @@ func (s *Suite) makeBlobTxs(count, blobs int, discriminator byte) (txs types.Tra
} }
func (s *Suite) TestBlobViolations(t *utesting.T) { func (s *Suite) TestBlobViolations(t *utesting.T) {
t.Log(`This test sends some invalid blob tx announcements and expects the node to disconnect.`)
if err := s.engine.sendForkchoiceUpdated(); err != nil { if err := s.engine.sendForkchoiceUpdated(); err != nil {
t.Fatalf("send fcu failed: %v", err) t.Fatalf("send fcu failed: %v", err)
} }
@ -796,12 +806,12 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
t2 = s.makeBlobTxs(2, 3, 0x2) t2 = s.makeBlobTxs(2, 3, 0x2)
) )
for _, test := range []struct { for _, test := range []struct {
ann eth.NewPooledTransactionHashesPacket68 ann eth.NewPooledTransactionHashesPacket
resp eth.PooledTransactionsResponse resp eth.PooledTransactionsResponse
}{ }{
// Invalid tx size. // Invalid tx size.
{ {
ann: eth.NewPooledTransactionHashesPacket68{ ann: eth.NewPooledTransactionHashesPacket{
Types: []byte{types.BlobTxType, types.BlobTxType}, Types: []byte{types.BlobTxType, types.BlobTxType},
Sizes: []uint32{uint32(t1[0].Size()), uint32(t1[1].Size() + 10)}, Sizes: []uint32{uint32(t1[0].Size()), uint32(t1[1].Size() + 10)},
Hashes: []common.Hash{t1[0].Hash(), t1[1].Hash()}, Hashes: []common.Hash{t1[0].Hash(), t1[1].Hash()},
@ -810,7 +820,7 @@ func (s *Suite) TestBlobViolations(t *utesting.T) {
}, },
// Wrong tx type. // Wrong tx type.
{ {
ann: eth.NewPooledTransactionHashesPacket68{ ann: eth.NewPooledTransactionHashesPacket{
Types: []byte{types.DynamicFeeTxType, types.BlobTxType}, Types: []byte{types.DynamicFeeTxType, types.BlobTxType},
Sizes: []uint32{uint32(t2[0].Size()), uint32(t2[1].Size())}, Sizes: []uint32{uint32(t2[0].Size()), uint32(t2[1].Size())},
Hashes: []common.Hash{t2[0].Hash(), t2[1].Hash()}, Hashes: []common.Hash{t2[0].Hash(), t2[1].Hash()},

@ -20,7 +20,7 @@ import (
crand "crypto/rand" crand "crypto/rand"
"fmt" "fmt"
"os" "os"
"path" "path/filepath"
"testing" "testing"
"time" "time"
@ -39,7 +39,7 @@ func makeJWTSecret() (string, [32]byte, error) {
if _, err := crand.Read(secret[:]); err != nil { if _, err := crand.Read(secret[:]); err != nil {
return "", secret, fmt.Errorf("failed to create jwt secret: %v", err) return "", secret, fmt.Errorf("failed to create jwt secret: %v", err)
} }
jwtPath := path.Join(os.TempDir(), "jwt_secret") jwtPath := filepath.Join(os.TempDir(), "jwt_secret")
if err := os.WriteFile(jwtPath, []byte(hexutil.Encode(secret[:])), 0600); err != nil { if err := os.WriteFile(jwtPath, []byte(hexutil.Encode(secret[:])), 0600); err != nil {
return "", secret, fmt.Errorf("failed to prepare jwt secret file: %v", err) return "", secret, fmt.Errorf("failed to prepare jwt secret file: %v", err)
} }

@ -25,11 +25,12 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/internal/utesting"
) )
// sendTxs sends the given transactions to the node and // sendTxs sends the given transactions to the node and
// expects the node to accept and propagate them. // expects the node to accept and propagate them.
func (s *Suite) sendTxs(txs []*types.Transaction) error { func (s *Suite) sendTxs(t *utesting.T, txs []*types.Transaction) error {
// Open sending conn. // Open sending conn.
sendConn, err := s.dial() sendConn, err := s.dial()
if err != nil { if err != nil {
@ -70,10 +71,19 @@ func (s *Suite) sendTxs(txs []*types.Transaction) error {
for _, tx := range *msg { for _, tx := range *msg {
got[tx.Hash()] = true got[tx.Hash()] = true
} }
case *eth.NewPooledTransactionHashesPacket68: case *eth.NewPooledTransactionHashesPacket:
for _, hash := range msg.Hashes { for _, hash := range msg.Hashes {
got[hash] = true got[hash] = true
} }
case *eth.GetBlockHeadersPacket:
headers, err := s.chain.GetHeaders(msg)
if err != nil {
t.Logf("invalid GetBlockHeaders request: %v", err)
}
recvConn.Write(ethProto, eth.BlockHeadersMsg, &eth.BlockHeadersPacket{
RequestId: msg.RequestId,
BlockHeadersRequest: headers,
})
default: default:
return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg)) return fmt.Errorf("unexpected eth wire msg: %s", pretty.Sdump(msg))
} }
@ -92,10 +102,10 @@ func (s *Suite) sendTxs(txs []*types.Transaction) error {
} }
} }
return fmt.Errorf("timed out waiting for txs") return errors.New("timed out waiting for txs")
} }
func (s *Suite) sendInvalidTxs(txs []*types.Transaction) error { func (s *Suite) sendInvalidTxs(t *utesting.T, txs []*types.Transaction) error {
// Open sending conn. // Open sending conn.
sendConn, err := s.dial() sendConn, err := s.dial()
if err != nil { if err != nil {
@ -146,12 +156,21 @@ func (s *Suite) sendInvalidTxs(txs []*types.Transaction) error {
return fmt.Errorf("received bad tx: %s", tx.Hash()) return fmt.Errorf("received bad tx: %s", tx.Hash())
} }
} }
case *eth.NewPooledTransactionHashesPacket68: case *eth.NewPooledTransactionHashesPacket:
for _, hash := range msg.Hashes { for _, hash := range msg.Hashes {
if _, ok := invalids[hash]; ok { if _, ok := invalids[hash]; ok {
return fmt.Errorf("received bad tx: %s", hash) return fmt.Errorf("received bad tx: %s", hash)
} }
} }
case *eth.GetBlockHeadersPacket:
headers, err := s.chain.GetHeaders(msg)
if err != nil {
t.Logf("invalid GetBlockHeaders request: %v", err)
}
recvConn.Write(ethProto, eth.BlockHeadersMsg, &eth.BlockHeadersPacket{
RequestId: msg.RequestId,
BlockHeadersRequest: headers,
})
default: default:
return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg)) return fmt.Errorf("unexpected eth message: %v", pretty.Sdump(msg))
} }

@ -53,16 +53,18 @@ func newTestEnv(remote string, listen1, listen2 string) *testenv {
if err != nil { if err != nil {
panic(err) panic(err)
} }
if node.IP() == nil || node.UDP() == 0 { if !node.IPAddr().IsValid() || node.UDP() == 0 {
var ip net.IP var ip net.IP
var tcpPort, udpPort int var tcpPort, udpPort int
if ip = node.IP(); ip == nil { if node.IPAddr().IsValid() {
ip = node.IPAddr().AsSlice()
} else {
ip = net.ParseIP("127.0.0.1") ip = net.ParseIP("127.0.0.1")
} }
if tcpPort = node.TCP(); tcpPort == 0 { if tcpPort = node.TCP(); tcpPort == 0 {
tcpPort = 30303 tcpPort = 30303
} }
if udpPort = node.TCP(); udpPort == 0 { if udpPort = node.UDP(); udpPort == 0 {
udpPort = 30303 udpPort = 30303
} }
node = enode.NewV4(node.Pubkey(), ip, tcpPort, udpPort) node = enode.NewV4(node.Pubkey(), ip, tcpPort, udpPort)
@ -110,7 +112,7 @@ func (te *testenv) localEndpoint(c net.PacketConn) v4wire.Endpoint {
} }
func (te *testenv) remoteEndpoint() v4wire.Endpoint { func (te *testenv) remoteEndpoint() v4wire.Endpoint {
return v4wire.NewEndpoint(te.remoteAddr, 0) return v4wire.NewEndpoint(te.remoteAddr.AddrPort(), 0)
} }
func contains(ns []v4wire.Node, key v4wire.Pubkey) bool { func contains(ns []v4wire.Node, key v4wire.Pubkey) bool {

@ -19,6 +19,7 @@ package v5test
import ( import (
"bytes" "bytes"
"net" "net"
"slices"
"sync" "sync"
"time" "time"
@ -266,7 +267,7 @@ func (s *Suite) TestFindnodeResults(t *utesting.T) {
n := bn.conn.localNode.Node() n := bn.conn.localNode.Node()
expect[n.ID()] = n expect[n.ID()] = n
d := uint(enode.LogDist(n.ID(), s.Dest.ID())) d := uint(enode.LogDist(n.ID(), s.Dest.ID()))
if !containsUint(dists, d) { if !slices.Contains(dists, d) {
dists = append(dists, d) dists = append(dists, d)
} }
} }

@ -252,12 +252,3 @@ func checkRecords(records []*enr.Record) ([]*enode.Node, error) {
} }
return nodes, nil return nodes, nil
} }
func containsUint(ints []uint, x uint) bool {
for i := range ints {
if ints[i] == x {
return true
}
}
return false
}

@ -66,11 +66,17 @@ func commandHasFlag(ctx *cli.Context, flag cli.Flag) bool {
for _, name := range names { for _, name := range names {
set[name] = struct{}{} set[name] = struct{}{}
} }
for _, fn := range ctx.FlagNames() { for _, ctx := range ctx.Lineage() {
if _, ok := set[fn]; ok { if ctx.Command != nil {
for _, f := range ctx.Command.Flags {
for _, name := range f.Names() {
if _, ok := set[name]; ok {
return true return true
} }
} }
}
}
}
return false return false
} }

@ -21,11 +21,11 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
"slices"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enode"
"golang.org/x/exp/slices"
) )
const jsonIndent = " " const jsonIndent = " "

@ -19,7 +19,7 @@ package main
import ( import (
"errors" "errors"
"fmt" "fmt"
"net" "net/netip"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -205,11 +205,11 @@ func trueFilter(args []string) (nodeFilter, error) {
} }
func ipFilter(args []string) (nodeFilter, error) { func ipFilter(args []string) (nodeFilter, error) {
_, cidr, err := net.ParseCIDR(args[0]) prefix, err := netip.ParsePrefix(args[0])
if err != nil { if err != nil {
return nil, err return nil, err
} }
f := func(n nodeJSON) bool { return cidr.Contains(n.N.IP()) } f := func(n nodeJSON) bool { return prefix.Contains(n.N.IPAddr()) }
return f, nil return f, nil
} }

@ -77,7 +77,11 @@ var (
func rlpxPing(ctx *cli.Context) error { func rlpxPing(ctx *cli.Context) error {
n := getNodeArg(ctx) n := getNodeArg(ctx)
fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", n.IP(), n.TCP())) tcpEndpoint, ok := n.TCPEndpoint()
if !ok {
return fmt.Errorf("node has no TCP endpoint")
}
fd, err := net.Dial("tcp", tcpEndpoint.String())
if err != nil { if err != nil {
return err return err
} }
@ -105,7 +109,7 @@ func rlpxPing(ctx *cli.Context) error {
} }
return fmt.Errorf("received disconnect message: %v", msg[0]) return fmt.Errorf("received disconnect message: %v", msg[0])
default: default:
return fmt.Errorf("invalid message code %d, expected handshake (code zero)", code) return fmt.Errorf("invalid message code %d, expected handshake (code zero) or disconnect (code one)", code)
} }
return nil return nil
} }

@ -0,0 +1,325 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"errors"
"fmt"
"math/big"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/urfave/cli/v2"
)
var app = flags.NewApp("go-ethereum era tool")
var (
dirFlag = &cli.StringFlag{
Name: "dir",
Usage: "directory storing all relevant era1 files",
Value: "eras",
}
networkFlag = &cli.StringFlag{
Name: "network",
Usage: "network name associated with era1 files",
Value: "mainnet",
}
eraSizeFlag = &cli.IntFlag{
Name: "size",
Usage: "number of blocks per era",
Value: era.MaxEra1Size,
}
txsFlag = &cli.BoolFlag{
Name: "txs",
Usage: "print full transaction values",
}
)
var (
blockCommand = &cli.Command{
Name: "block",
Usage: "get block data",
ArgsUsage: "<number>",
Action: block,
Flags: []cli.Flag{
txsFlag,
},
}
infoCommand = &cli.Command{
Name: "info",
ArgsUsage: "<epoch>",
Usage: "get epoch information",
Action: info,
}
verifyCommand = &cli.Command{
Name: "verify",
ArgsUsage: "<expected>",
Usage: "verifies each era1 against expected accumulator root",
Action: verify,
}
)
func init() {
app.Commands = []*cli.Command{
blockCommand,
infoCommand,
verifyCommand,
}
app.Flags = []cli.Flag{
dirFlag,
networkFlag,
eraSizeFlag,
}
}
func main() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
// block prints the specified block from an era1 store.
func block(ctx *cli.Context) error {
num, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("invalid block number: %w", err)
}
e, err := open(ctx, num/uint64(ctx.Int(eraSizeFlag.Name)))
if err != nil {
return fmt.Errorf("error opening era1: %w", err)
}
defer e.Close()
// Read block with number.
block, err := e.GetBlockByNumber(num)
if err != nil {
return fmt.Errorf("error reading block %d: %w", num, err)
}
// Convert block to JSON and print.
val := ethapi.RPCMarshalBlock(block, ctx.Bool(txsFlag.Name), ctx.Bool(txsFlag.Name), params.MainnetChainConfig)
b, err := json.MarshalIndent(val, "", " ")
if err != nil {
return fmt.Errorf("error marshaling json: %w", err)
}
fmt.Println(string(b))
return nil
}
// info prints some high-level information about the era1 file.
func info(ctx *cli.Context) error {
epoch, err := strconv.ParseUint(ctx.Args().First(), 10, 64)
if err != nil {
return fmt.Errorf("invalid epoch number: %w", err)
}
e, err := open(ctx, epoch)
if err != nil {
return err
}
defer e.Close()
acc, err := e.Accumulator()
if err != nil {
return fmt.Errorf("error reading accumulator: %w", err)
}
td, err := e.InitialTD()
if err != nil {
return fmt.Errorf("error reading total difficulty: %w", err)
}
info := struct {
Accumulator common.Hash `json:"accumulator"`
TotalDifficulty *big.Int `json:"totalDifficulty"`
StartBlock uint64 `json:"startBlock"`
Count uint64 `json:"count"`
}{
acc, td, e.Start(), e.Count(),
}
b, _ := json.MarshalIndent(info, "", " ")
fmt.Println(string(b))
return nil
}
// open opens an era1 file at a certain epoch.
func open(ctx *cli.Context, epoch uint64) (*era.Era, error) {
var (
dir = ctx.String(dirFlag.Name)
network = ctx.String(networkFlag.Name)
)
entries, err := era.ReadDir(dir, network)
if err != nil {
return nil, fmt.Errorf("error reading era dir: %w", err)
}
if epoch >= uint64(len(entries)) {
return nil, fmt.Errorf("epoch out-of-bounds: last %d, want %d", len(entries)-1, epoch)
}
return era.Open(filepath.Join(dir, entries[epoch]))
}
// verify checks each era1 file in a directory to ensure it is well-formed and
// that the accumulator matches the expected value.
func verify(ctx *cli.Context) error {
if ctx.Args().Len() != 1 {
return errors.New("missing accumulators file")
}
roots, err := readHashes(ctx.Args().First())
if err != nil {
return fmt.Errorf("unable to read expected roots file: %w", err)
}
var (
dir = ctx.String(dirFlag.Name)
network = ctx.String(networkFlag.Name)
start = time.Now()
reported = time.Now()
)
entries, err := era.ReadDir(dir, network)
if err != nil {
return fmt.Errorf("error reading %s: %w", dir, err)
}
if len(entries) != len(roots) {
return errors.New("number of era1 files should match the number of accumulator hashes")
}
// Verify each epoch matches the expected root.
for i, want := range roots {
// Wrap in function so defers don't stack.
err := func() error {
name := entries[i]
e, err := era.Open(filepath.Join(dir, name))
if err != nil {
return fmt.Errorf("error opening era1 file %s: %w", name, err)
}
defer e.Close()
// Read accumulator and check against expected.
if got, err := e.Accumulator(); err != nil {
return fmt.Errorf("error retrieving accumulator for %s: %w", name, err)
} else if got != want {
return fmt.Errorf("invalid root %s: got %s, want %s", name, got, want)
}
// Recompute accumulator.
if err := checkAccumulator(e); err != nil {
return fmt.Errorf("error verify era1 file %s: %w", name, err)
}
// Give the user some feedback that something is happening.
if time.Since(reported) >= 8*time.Second {
fmt.Printf("Verifying Era1 files \t\t verified=%d,\t elapsed=%s\n", i, common.PrettyDuration(time.Since(start)))
reported = time.Now()
}
return nil
}()
if err != nil {
return err
}
}
return nil
}
// checkAccumulator verifies the accumulator matches the data in the Era.
func checkAccumulator(e *era.Era) error {
var (
err error
want common.Hash
td *big.Int
tds = make([]*big.Int, 0)
hashes = make([]common.Hash, 0)
)
if want, err = e.Accumulator(); err != nil {
return fmt.Errorf("error reading accumulator: %w", err)
}
if td, err = e.InitialTD(); err != nil {
return fmt.Errorf("error reading total difficulty: %w", err)
}
it, err := era.NewIterator(e)
if err != nil {
return fmt.Errorf("error making era iterator: %w", err)
}
// To fully verify an era the following attributes must be checked:
// 1) the block index is constructed correctly
// 2) the tx root matches the value in the block
// 3) the receipts root matches the value in the block
// 4) the starting total difficulty value is correct
// 5) the accumulator is correct by recomputing it locally, which verifies
// the blocks are all correct (via hash)
//
// The attributes 1), 2), and 3) are checked for each block. 4) and 5) require
// accumulation across the entire set and are verified at the end.
for it.Next() {
// 1) next() walks the block index, so we're able to implicitly verify it.
if it.Error() != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
}
block, receipts, err := it.BlockAndReceipts()
if it.Error() != nil {
return fmt.Errorf("error reading block %d: %w", it.Number(), err)
}
// 2) recompute tx root and verify against header.
tr := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil))
if tr != block.TxHash() {
return fmt.Errorf("tx root in block %d mismatch: want %s, got %s", block.NumberU64(), block.TxHash(), tr)
}
// 3) recompute receipt root and check value against block.
rr := types.DeriveSha(receipts, trie.NewStackTrie(nil))
if rr != block.ReceiptHash() {
return fmt.Errorf("receipt root in block %d mismatch: want %s, got %s", block.NumberU64(), block.ReceiptHash(), rr)
}
hashes = append(hashes, block.Hash())
td.Add(td, block.Difficulty())
tds = append(tds, new(big.Int).Set(td))
}
// 4+5) Verify accumulator and total difficulty.
got, err := era.ComputeAccumulator(hashes, tds)
if err != nil {
return fmt.Errorf("error computing accumulator: %w", err)
}
if got != want {
return fmt.Errorf("expected accumulator root does not match calculated: got %s, want %s", got, want)
}
return nil
}
// readHashes reads a file of newline-delimited hashes.
func readHashes(f string) ([]common.Hash, error) {
b, err := os.ReadFile(f)
if err != nil {
return nil, errors.New("unable to open accumulators file")
}
s := strings.Split(string(b), "\n")
// Remove empty last element, if present.
if s[len(s)-1] == "" {
s = s[:len(s)-1]
}
// Convert to hashes.
r := make([]common.Hash, len(s))
for i := range s {
r[i] = common.HexToHash(s[i])
}
return r, nil
}

@ -50,4 +50,4 @@ contains the password.
## JSON ## JSON
In case you need to output the result in a JSON format, you shall by using the `--json` flag. In case you need to output the result in a JSON format, you shall use the `--json` flag.

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/eth/tracers/logger" "github.com/ethereum/go-ethereum/eth/tracers/logger"
"github.com/ethereum/go-ethereum/tests" "github.com/ethereum/go-ethereum/tests"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
@ -51,7 +51,7 @@ func blockTestCmd(ctx *cli.Context) error {
return errors.New("path-to-test argument required") return errors.New("path-to-test argument required")
} }
var tracer vm.EVMLogger var tracer *tracing.Hooks
// Configure the EVM logger // Configure the EVM logger
if ctx.Bool(MachineFlag.Name) { if ctx.Bool(MachineFlag.Name) {
tracer = logger.NewJSONLogger(&logger.Config{ tracer = logger.NewJSONLogger(&logger.Config{

@ -160,7 +160,7 @@ func (i *bbInput) ToBlock() *types.Block {
if i.Header.Difficulty != nil { if i.Header.Difficulty != nil {
header.Difficulty = i.Header.Difficulty header.Difficulty = i.Header.Difficulty
} }
return types.NewBlockWithHeader(header).WithBody(i.Txs, i.Ommers).WithWithdrawals(i.Withdrawals) return types.NewBlockWithHeader(header).WithBody(types.Body{Transactions: i.Txs, Uncles: i.Ommers, Withdrawals: i.Withdrawals})
} }
// SealBlock seals the given block using the configured engine. // SealBlock seals the given block using the configured engine.
@ -242,7 +242,7 @@ func readInput(ctx *cli.Context) (*bbInput, error) {
if headerStr == stdinSelector || ommersStr == stdinSelector || txsStr == stdinSelector || cliqueStr == stdinSelector { if headerStr == stdinSelector || ommersStr == stdinSelector || txsStr == stdinSelector || cliqueStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin) decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(inputData); err != nil { if err := decoder.Decode(inputData); err != nil {
return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshalling stdin: %v", err))
} }
} }
if cliqueStr != stdinSelector && cliqueStr != "" { if cliqueStr != stdinSelector && cliqueStr != "" {

@ -17,7 +17,9 @@
package t8ntool package t8ntool
import ( import (
"encoding/json"
"fmt" "fmt"
"io"
"math/big" "math/big"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -28,20 +30,24 @@ import (
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/triedb"
"github.com/holiman/uint256"
"golang.org/x/crypto/sha3" "golang.org/x/crypto/sha3"
) )
type Prestate struct { type Prestate struct {
Env stEnv `json:"env"` Env stEnv `json:"env"`
Pre core.GenesisAlloc `json:"pre"` Pre types.GenesisAlloc `json:"pre"`
} }
// ExecutionResult contains the execution status after running a state test, any // ExecutionResult contains the execution status after running a state test, any
@ -117,7 +123,7 @@ type rejectedTx struct {
// Apply applies a set of transactions to a pre-state // Apply applies a set of transactions to a pre-state
func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
txIt txIterator, miningReward int64, txIt txIterator, miningReward int64,
getTracerFn func(txIndex int, txHash common.Hash) (vm.EVMLogger, error)) (*state.StateDB, *ExecutionResult, []byte, error) { getTracerFn func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error)) (*state.StateDB, *ExecutionResult, []byte, error) {
// Capture errors for BLOCKHASH operation, if we haven't been supplied the // Capture errors for BLOCKHASH operation, if we haven't been supplied the
// required blockhashes // required blockhashes
var hashError error var hashError error
@ -167,7 +173,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
// Calculate the BlobBaseFee // Calculate the BlobBaseFee
var excessBlobGas uint64 var excessBlobGas uint64
if pre.Env.ExcessBlobGas != nil { if pre.Env.ExcessBlobGas != nil {
excessBlobGas := *pre.Env.ExcessBlobGas excessBlobGas = *pre.Env.ExcessBlobGas
vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas) vmContext.BlobBaseFee = eip4844.CalcBlobFee(excessBlobGas)
} else { } else {
// If it is not explicitly defined, but we have the parent values, we try // If it is not explicitly defined, but we have the parent values, we try
@ -220,11 +226,13 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
continue continue
} }
} }
tracer, err := getTracerFn(txIndex, tx.Hash()) tracer, traceOutput, err := getTracerFn(txIndex, tx.Hash())
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
vmConfig.Tracer = tracer if tracer != nil {
vmConfig.Tracer = tracer.Hooks
}
statedb.SetTxContext(tx.Hash(), txIndex) statedb.SetTxContext(tx.Hash(), txIndex)
var ( var (
@ -234,6 +242,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
) )
evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig) evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig)
if tracer != nil && tracer.OnTxStart != nil {
tracer.OnTxStart(evm.GetVMContext(), tx, msg.From)
}
// (ret []byte, usedGas uint64, failed bool, err error) // (ret []byte, usedGas uint64, failed bool, err error)
msgResult, err := core.ApplyMessage(evm, msg, gaspool) msgResult, err := core.ApplyMessage(evm, msg, gaspool)
if err != nil { if err != nil {
@ -241,6 +252,14 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From, "error", err) log.Info("rejected tx", "index", i, "hash", tx.Hash(), "from", msg.From, "error", err)
rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()})
gaspool.SetGas(prevGas) gaspool.SetGas(prevGas)
if tracer != nil {
if tracer.OnTxEnd != nil {
tracer.OnTxEnd(nil, err)
}
if err := writeTraceResult(tracer, traceOutput); err != nil {
log.Warn("Error writing tracer output", "err", err)
}
}
continue continue
} }
includedTxs = append(includedTxs, tx) includedTxs = append(includedTxs, tx)
@ -283,6 +302,12 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
//receipt.BlockNumber //receipt.BlockNumber
receipt.TransactionIndex = uint(txIndex) receipt.TransactionIndex = uint(txIndex)
receipts = append(receipts, receipt) receipts = append(receipts, receipt)
if tracer != nil {
if tracer.Hooks.OnTxEnd != nil {
tracer.Hooks.OnTxEnd(receipt, nil)
}
writeTraceResult(tracer, traceOutput)
}
} }
txIndex++ txIndex++
@ -308,15 +333,15 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
reward.Sub(reward, new(big.Int).SetUint64(ommer.Delta)) reward.Sub(reward, new(big.Int).SetUint64(ommer.Delta))
reward.Mul(reward, blockReward) reward.Mul(reward, blockReward)
reward.Div(reward, big.NewInt(8)) reward.Div(reward, big.NewInt(8))
statedb.AddBalance(ommer.Address, reward) statedb.AddBalance(ommer.Address, uint256.MustFromBig(reward), tracing.BalanceIncreaseRewardMineUncle)
} }
statedb.AddBalance(pre.Env.Coinbase, minerReward) statedb.AddBalance(pre.Env.Coinbase, uint256.MustFromBig(minerReward), tracing.BalanceIncreaseRewardMineBlock)
} }
// Apply withdrawals // Apply withdrawals
for _, w := range pre.Env.Withdrawals { for _, w := range pre.Env.Withdrawals {
// Amount is in gwei, turn into wei // Amount is in gwei, turn into wei
amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei)) amount := new(big.Int).Mul(new(big.Int).SetUint64(w.Amount), big.NewInt(params.GWei))
statedb.AddBalance(w.Address, amount) statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal)
} }
// Commit block // Commit block
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber)) root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
@ -353,13 +378,13 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
return statedb, execRs, body, nil return statedb, execRs, body, nil
} }
func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB { func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB {
sdb := state.NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) sdb := state.NewDatabaseWithConfig(db, &triedb.Config{Preimages: true})
statedb, _ := state.New(types.EmptyRootHash, sdb, nil) statedb, _ := state.New(types.EmptyRootHash, sdb, nil)
for addr, a := range accounts { for addr, a := range accounts {
statedb.SetCode(addr, a.Code) statedb.SetCode(addr, a.Code)
statedb.SetNonce(addr, a.Nonce) statedb.SetNonce(addr, a.Nonce)
statedb.SetBalance(addr, a.Balance) statedb.SetBalance(addr, uint256.MustFromBig(a.Balance), tracing.BalanceIncreaseGenesisBalance)
for k, v := range a.Storage { for k, v := range a.Storage {
statedb.SetState(addr, k, v) statedb.SetState(addr, k, v)
} }
@ -396,3 +421,16 @@ func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime
} }
return ethash.CalcDifficulty(config, currentTime, parent) return ethash.CalcDifficulty(config, currentTime, parent)
} }
func writeTraceResult(tracer *tracers.Tracer, f io.WriteCloser) error {
defer f.Close()
result, err := tracer.GetResult()
if err != nil || result == nil {
return err
}
err = json.NewEncoder(f).Encode(result)
if err != nil {
return err
}
return nil
}

@ -50,6 +50,10 @@ var (
Name: "trace.returndata", Name: "trace.returndata",
Usage: "Enable return data output in traces", Usage: "Enable return data output in traces",
} }
TraceEnableCallFramesFlag = &cli.BoolFlag{
Name: "trace.callframes",
Usage: "Enable call frames output in traces",
}
OutputBasedir = &cli.StringFlag{ OutputBasedir = &cli.StringFlag{
Name: "output.basedir", Name: "output.basedir",
Usage: "Specifies where output files are placed. Will be created if it does not exist.", Usage: "Specifies where output files are placed. Will be created if it does not exist.",

@ -1,81 +0,0 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package t8ntool
import (
"encoding/json"
"io"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers"
"github.com/ethereum/go-ethereum/log"
)
// traceWriter is an vm.EVMLogger which also holds an inner logger/tracer.
// When the TxEnd event happens, the inner tracer result is written to the file, and
// the file is closed.
type traceWriter struct {
inner vm.EVMLogger
f io.WriteCloser
}
// Compile-time interface check
var _ = vm.EVMLogger((*traceWriter)(nil))
func (t *traceWriter) CaptureTxEnd(restGas uint64) {
t.inner.CaptureTxEnd(restGas)
defer t.f.Close()
if tracer, ok := t.inner.(tracers.Tracer); ok {
result, err := tracer.GetResult()
if err != nil {
log.Warn("Error in tracer", "err", err)
return
}
err = json.NewEncoder(t.f).Encode(result)
if err != nil {
log.Warn("Error writing tracer output", "err", err)
return
}
}
}
func (t *traceWriter) CaptureTxStart(gasLimit uint64) { t.inner.CaptureTxStart(gasLimit) }
func (t *traceWriter) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {
t.inner.CaptureStart(env, from, to, create, input, gas, value)
}
func (t *traceWriter) CaptureEnd(output []byte, gasUsed uint64, err error) {
t.inner.CaptureEnd(output, gasUsed, err)
}
func (t *traceWriter) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) {
t.inner.CaptureEnter(typ, from, to, input, gas, value)
}
func (t *traceWriter) CaptureExit(output []byte, gasUsed uint64, err error) {
t.inner.CaptureExit(output, gasUsed, err)
}
func (t *traceWriter) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {
t.inner.CaptureState(pc, op, gas, cost, scope, rData, depth, err)
}
func (t *traceWriter) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {
t.inner.CaptureFault(pc, op, gas, cost, scope, depth, err)
}

@ -86,7 +86,7 @@ func Transaction(ctx *cli.Context) error {
if txStr == stdinSelector { if txStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin) decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(inputData); err != nil { if err := decoder.Decode(inputData); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) return NewError(ErrorJson, fmt.Errorf("failed unmarshalling stdin: %v", err))
} }
// Decode the body of already signed transactions // Decode the body of already signed transactions
body = common.FromHex(inputData.TxRlp) body = common.FromHex(inputData.TxRlp)

@ -20,15 +20,16 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"io"
"math/big" "math/big"
"os" "os"
"path" "path/filepath"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/tracing"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/eth/tracers"
@ -74,14 +75,14 @@ var (
) )
type input struct { type input struct {
Alloc core.GenesisAlloc `json:"alloc,omitempty"` Alloc types.GenesisAlloc `json:"alloc,omitempty"`
Env *stEnv `json:"env,omitempty"` Env *stEnv `json:"env,omitempty"`
Txs []*txWithKey `json:"txs,omitempty"` Txs []*txWithKey `json:"txs,omitempty"`
TxRlp string `json:"txsRlp,omitempty"` TxRlp string `json:"txsRlp,omitempty"`
} }
func Transition(ctx *cli.Context) error { func Transition(ctx *cli.Context) error {
var getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) { return nil, nil } var getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) { return nil, nil, nil }
baseDir, err := createBasedir(ctx) baseDir, err := createBasedir(ctx)
if err != nil { if err != nil {
@ -96,28 +97,40 @@ func Transition(ctx *cli.Context) error {
EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name), EnableReturnData: ctx.Bool(TraceEnableReturnDataFlag.Name),
Debug: true, Debug: true,
} }
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) { getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String()))) traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.jsonl", txIndex, txHash.String())))
if err != nil { if err != nil {
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
} }
return &traceWriter{logger.NewJSONLogger(logConfig, traceFile), traceFile}, nil var l *tracing.Hooks
if ctx.Bool(TraceEnableCallFramesFlag.Name) {
l = logger.NewJSONLoggerWithCallFrames(logConfig, traceFile)
} else {
l = logger.NewJSONLogger(logConfig, traceFile)
}
tracer := &tracers.Tracer{
Hooks: l,
// jsonLogger streams out result to file.
GetResult: func() (json.RawMessage, error) { return nil, nil },
Stop: func(err error) {},
}
return tracer, traceFile, nil
} }
} else if ctx.IsSet(TraceTracerFlag.Name) { } else if ctx.IsSet(TraceTracerFlag.Name) {
var config json.RawMessage var config json.RawMessage
if ctx.IsSet(TraceTracerConfigFlag.Name) { if ctx.IsSet(TraceTracerConfigFlag.Name) {
config = []byte(ctx.String(TraceTracerConfigFlag.Name)) config = []byte(ctx.String(TraceTracerConfigFlag.Name))
} }
getTracer = func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) { getTracer = func(txIndex int, txHash common.Hash) (*tracers.Tracer, io.WriteCloser, error) {
traceFile, err := os.Create(path.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String()))) traceFile, err := os.Create(filepath.Join(baseDir, fmt.Sprintf("trace-%d-%v.json", txIndex, txHash.String())))
if err != nil { if err != nil {
return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err)) return nil, nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err))
} }
tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config) tracer, err := tracers.DefaultDirectory.New(ctx.String(TraceTracerFlag.Name), nil, config)
if err != nil { if err != nil {
return nil, NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %w", err)) return nil, nil, NewError(ErrorConfig, fmt.Errorf("failed instantiating tracer: %w", err))
} }
return &traceWriter{tracer, traceFile}, nil return tracer, traceFile, nil
} }
} }
// We need to load three things: alloc, env and transactions. May be either in // We need to load three things: alloc, env and transactions. May be either in
@ -136,7 +149,7 @@ func Transition(ctx *cli.Context) error {
if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector {
decoder := json.NewDecoder(os.Stdin) decoder := json.NewDecoder(os.Stdin)
if err := decoder.Decode(inputData); err != nil { if err := decoder.Decode(inputData); err != nil {
return NewError(ErrorJson, fmt.Errorf("failed unmarshaling stdin: %v", err)) return NewError(ErrorJson, fmt.Errorf("failed unmarshalling stdin: %v", err))
} }
} }
if allocStr != stdinSelector { if allocStr != stdinSelector {
@ -168,7 +181,7 @@ func Transition(ctx *cli.Context) error {
// Set the chain id // Set the chain id
chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name))
if txIt, err = loadTransactions(txStr, inputData, prestate.Env, chainConfig); err != nil { if txIt, err = loadTransactions(txStr, inputData, chainConfig); err != nil {
return err return err
} }
if err := applyLondonChecks(&prestate.Env, chainConfig); err != nil { if err := applyLondonChecks(&prestate.Env, chainConfig); err != nil {
@ -204,7 +217,7 @@ func applyLondonChecks(env *stEnv, chainConfig *params.ChainConfig) error {
return nil return nil
} }
if env.ParentBaseFee == nil || env.Number == 0 { if env.ParentBaseFee == nil || env.Number == 0 {
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'parentBaseFee' in env section"))
} }
env.BaseFee = eip1559.CalcBaseFee(chainConfig, &types.Header{ env.BaseFee = eip1559.CalcBaseFee(chainConfig, &types.Header{
Number: new(big.Int).SetUint64(env.Number - 1), Number: new(big.Int).SetUint64(env.Number - 1),
@ -272,7 +285,7 @@ func applyCancunChecks(env *stEnv, chainConfig *params.ChainConfig) error {
return nil return nil
} }
type Alloc map[common.Address]core.GenesisAccount type Alloc map[common.Address]types.Account
func (g Alloc) OnRoot(common.Hash) {} func (g Alloc) OnRoot(common.Hash) {}
@ -280,15 +293,15 @@ func (g Alloc) OnAccount(addr *common.Address, dumpAccount state.DumpAccount) {
if addr == nil { if addr == nil {
return return
} }
balance, _ := new(big.Int).SetString(dumpAccount.Balance, 10) balance, _ := new(big.Int).SetString(dumpAccount.Balance, 0)
var storage map[common.Hash]common.Hash var storage map[common.Hash]common.Hash
if dumpAccount.Storage != nil { if dumpAccount.Storage != nil {
storage = make(map[common.Hash]common.Hash) storage = make(map[common.Hash]common.Hash, len(dumpAccount.Storage))
for k, v := range dumpAccount.Storage { for k, v := range dumpAccount.Storage {
storage[k] = common.HexToHash(v) storage[k] = common.HexToHash(v)
} }
} }
genesisAccount := core.GenesisAccount{ genesisAccount := types.Account{
Code: dumpAccount.Code, Code: dumpAccount.Code,
Storage: storage, Storage: storage,
Balance: balance, Balance: balance,
@ -303,7 +316,7 @@ func saveFile(baseDir, filename string, data interface{}) error {
if err != nil { if err != nil {
return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err))
} }
location := path.Join(baseDir, filename) location := filepath.Join(baseDir, filename)
if err = os.WriteFile(location, b, 0644); err != nil { if err = os.WriteFile(location, b, 0644); err != nil {
return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err)) return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err))
} }

@ -112,7 +112,7 @@ func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Tran
return signedTxs, nil return signedTxs, nil
} }
func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *params.ChainConfig) (txIterator, error) { func loadTransactions(txStr string, inputData *input, chainConfig *params.ChainConfig) (txIterator, error) {
var txsWithKeys []*txWithKey var txsWithKeys []*txWithKey
if txStr != stdinSelector { if txStr != stdinSelector {
data, err := os.ReadFile(txStr) data, err := os.ReadFile(txStr)
@ -127,7 +127,7 @@ func loadTransactions(txStr string, inputData *input, env stEnv, chainConfig *pa
return newRlpTxIterator(body), nil return newRlpTxIterator(body), nil
} }
if err := json.Unmarshal(data, &txsWithKeys); err != nil { if err := json.Unmarshal(data, &txsWithKeys); err != nil {
return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshaling txs-file: %v", err)) return nil, NewError(ErrorJson, fmt.Errorf("failed unmarshalling txs-file: %v", err))
} }
} else { } else {
if len(inputData.TxRlp) > 0 { if len(inputData.TxRlp) > 0 {

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save