diff --git a/beacon/blsync/block_sync.go b/beacon/blsync/block_sync.go
new file mode 100755
index 0000000000..91b21163e6
--- /dev/null
+++ b/beacon/blsync/block_sync.go
@@ -0,0 +1,203 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blsync
+
+import (
+ "fmt"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/beacon/engine"
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/light/sync"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/lru"
+ ctypes "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/trie"
+ "github.com/holiman/uint256"
+ "github.com/protolambda/zrnt/eth2/beacon/capella"
+ "github.com/protolambda/zrnt/eth2/configs"
+ "github.com/protolambda/ztyp/tree"
+)
+
+// beaconBlockSync implements request.Module; it fetches the beacon blocks belonging
+// to the validated and prefetch heads.
+type beaconBlockSync struct {
+ recentBlocks *lru.Cache[common.Hash, *capella.BeaconBlock]
+ locked map[common.Hash]request.ServerAndID
+ serverHeads map[request.Server]common.Hash
+ headTracker headTracker
+
+ lastHeadInfo types.HeadInfo
+ chainHeadFeed *event.Feed
+}
+
+type headTracker interface {
+ PrefetchHead() types.HeadInfo
+ ValidatedHead() (types.SignedHeader, bool)
+ ValidatedFinality() (types.FinalityUpdate, bool)
+}
+
+// newBeaconBlockSync returns a new beaconBlockSync.
+func newBeaconBlockSync(headTracker headTracker, chainHeadFeed *event.Feed) *beaconBlockSync {
+ return &beaconBlockSync{
+ headTracker: headTracker,
+ chainHeadFeed: chainHeadFeed,
+ recentBlocks: lru.NewCache[common.Hash, *capella.BeaconBlock](10),
+ locked: make(map[common.Hash]request.ServerAndID),
+ serverHeads: make(map[request.Server]common.Hash),
+ }
+}
+
+// Process implements request.Module.
+func (s *beaconBlockSync) Process(requester request.Requester, events []request.Event) {
+ for _, event := range events {
+ switch event.Type {
+ case request.EvResponse, request.EvFail, request.EvTimeout:
+ sid, req, resp := event.RequestInfo()
+ blockRoot := common.Hash(req.(sync.ReqBeaconBlock))
+ if resp != nil {
+ s.recentBlocks.Add(blockRoot, resp.(*capella.BeaconBlock))
+ }
+ if s.locked[blockRoot] == sid {
+ delete(s.locked, blockRoot)
+ }
+ case sync.EvNewHead:
+ s.serverHeads[event.Server] = event.Data.(types.HeadInfo).BlockRoot
+ case request.EvUnregistered:
+ delete(s.serverHeads, event.Server)
+ }
+ }
+ s.updateEventFeed()
+ // request validated head block if unavailable and not yet requested
+ if vh, ok := s.headTracker.ValidatedHead(); ok {
+ s.tryRequestBlock(requester, vh.Header.Hash(), false)
+ }
+ // request prefetch head if the given server has announced it
+ if prefetchHead := s.headTracker.PrefetchHead().BlockRoot; prefetchHead != (common.Hash{}) {
+ s.tryRequestBlock(requester, prefetchHead, true)
+ }
+}
+
+func (s *beaconBlockSync) tryRequestBlock(requester request.Requester, blockRoot common.Hash, needSameHead bool) {
+ if _, ok := s.recentBlocks.Get(blockRoot); ok {
+ return
+ }
+ if _, ok := s.locked[blockRoot]; ok {
+ return
+ }
+ for _, server := range requester.CanSendTo() {
+ if needSameHead && (s.serverHeads[server] != blockRoot) {
+ continue
+ }
+ id := requester.Send(server, sync.ReqBeaconBlock(blockRoot))
+ s.locked[blockRoot] = request.ServerAndID{Server: server, ID: id}
+ return
+ }
+}
+
+func blockHeadInfo(block *capella.BeaconBlock) types.HeadInfo {
+ if block == nil {
+ return types.HeadInfo{}
+ }
+ return types.HeadInfo{Slot: uint64(block.Slot), BlockRoot: beaconBlockHash(block)}
+}
+
+// beaconBlockHash calculates the hash of a beacon block.
+func beaconBlockHash(beaconBlock *capella.BeaconBlock) common.Hash {
+ return common.Hash(beaconBlock.HashTreeRoot(configs.Mainnet, tree.GetHashFn()))
+}
+
+// getExecBlock extracts the execution block from the beacon block's payload.
+func getExecBlock(beaconBlock *capella.BeaconBlock) (*ctypes.Block, error) {
+ payload := &beaconBlock.Body.ExecutionPayload
+ txs := make([]*ctypes.Transaction, len(payload.Transactions))
+ for i, opaqueTx := range payload.Transactions {
+ var tx ctypes.Transaction
+ if err := tx.UnmarshalBinary(opaqueTx); err != nil {
+ return nil, fmt.Errorf("failed to parse tx %d: %v", i, err)
+ }
+ txs[i] = &tx
+ }
+ withdrawals := make([]*ctypes.Withdrawal, len(payload.Withdrawals))
+ for i, w := range payload.Withdrawals {
+ withdrawals[i] = &ctypes.Withdrawal{
+ Index: uint64(w.Index),
+ Validator: uint64(w.ValidatorIndex),
+ Address: common.Address(w.Address),
+ Amount: uint64(w.Amount),
+ }
+ }
+ wroot := ctypes.DeriveSha(ctypes.Withdrawals(withdrawals), trie.NewStackTrie(nil))
+ execHeader := &ctypes.Header{
+ ParentHash: common.Hash(payload.ParentHash),
+ UncleHash: ctypes.EmptyUncleHash,
+ Coinbase: common.Address(payload.FeeRecipient),
+ Root: common.Hash(payload.StateRoot),
+ TxHash: ctypes.DeriveSha(ctypes.Transactions(txs), trie.NewStackTrie(nil)),
+ ReceiptHash: common.Hash(payload.ReceiptsRoot),
+ Bloom: ctypes.Bloom(payload.LogsBloom),
+ Difficulty: common.Big0,
+ Number: new(big.Int).SetUint64(uint64(payload.BlockNumber)),
+ GasLimit: uint64(payload.GasLimit),
+ GasUsed: uint64(payload.GasUsed),
+ Time: uint64(payload.Timestamp),
+ Extra: []byte(payload.ExtraData),
+ MixDigest: common.Hash(payload.PrevRandao), // reused in merge
+ Nonce: ctypes.BlockNonce{}, // zero
+ BaseFee: (*uint256.Int)(&payload.BaseFeePerGas).ToBig(),
+ WithdrawalsHash: &wroot,
+ }
+ execBlock := ctypes.NewBlockWithHeader(execHeader).WithBody(txs, nil).WithWithdrawals(withdrawals)
+ if execBlockHash := execBlock.Hash(); execBlockHash != common.Hash(payload.BlockHash) {
+ return execBlock, fmt.Errorf("Sanity check failed, payload hash does not match (expected %x, got %x)", common.Hash(payload.BlockHash), execBlockHash)
+ }
+ return execBlock, nil
+}
+
+func (s *beaconBlockSync) updateEventFeed() {
+ head, ok := s.headTracker.ValidatedHead()
+ if !ok {
+ return
+ }
+ finality, ok := s.headTracker.ValidatedFinality() //TODO fetch directly if subscription does not deliver
+ if !ok || head.Header.Epoch() != finality.Attested.Header.Epoch() {
+ return
+ }
+ validatedHead := head.Header.Hash()
+ headBlock, ok := s.recentBlocks.Get(validatedHead)
+ if !ok {
+ return
+ }
+ headInfo := blockHeadInfo(headBlock)
+ if headInfo == s.lastHeadInfo {
+ return
+ }
+ s.lastHeadInfo = headInfo
+ // new head block and finality info available; extract executable data and send event to feed
+ execBlock, err := getExecBlock(headBlock)
+ if err != nil {
+ log.Error("Error extracting execution block from validated beacon block", "error", err)
+ return
+ }
+ s.chainHeadFeed.Send(types.ChainHeadEvent{
+ HeadBlock: engine.BlockToExecutableData(execBlock, nil, nil).ExecutionPayload,
+ Finalized: common.Hash(finality.Finalized.PayloadHeader.BlockHash),
+ })
+}
diff --git a/beacon/blsync/block_sync_test.go b/beacon/blsync/block_sync_test.go
new file mode 100644
index 0000000000..9ce434d862
--- /dev/null
+++ b/beacon/blsync/block_sync_test.go
@@ -0,0 +1,160 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blsync
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/light/sync"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/protolambda/zrnt/eth2/beacon/capella"
+ "github.com/protolambda/zrnt/eth2/configs"
+ "github.com/protolambda/ztyp/tree"
+)
+
+var (
+ testServer1 = "testServer1"
+ testServer2 = "testServer2"
+
+ testBlock1 = &capella.BeaconBlock{
+ Slot: 123,
+ Body: capella.BeaconBlockBody{
+ ExecutionPayload: capella.ExecutionPayload{BlockNumber: 456},
+ },
+ }
+ testBlock2 = &capella.BeaconBlock{
+ Slot: 124,
+ Body: capella.BeaconBlockBody{
+ ExecutionPayload: capella.ExecutionPayload{BlockNumber: 457},
+ },
+ }
+)
+
+func init() {
+ eb1, _ := getExecBlock(testBlock1)
+ testBlock1.Body.ExecutionPayload.BlockHash = tree.Root(eb1.Hash())
+ eb2, _ := getExecBlock(testBlock2)
+ testBlock2.Body.ExecutionPayload.BlockHash = tree.Root(eb2.Hash())
+}
+
+func TestBlockSync(t *testing.T) {
+ ht := &testHeadTracker{}
+ eventFeed := new(event.Feed)
+ blockSync := newBeaconBlockSync(ht, eventFeed)
+ headCh := make(chan types.ChainHeadEvent, 16)
+ eventFeed.Subscribe(headCh)
+ ts := sync.NewTestScheduler(t, blockSync)
+ ts.AddServer(testServer1, 1)
+ ts.AddServer(testServer2, 1)
+
+ expHeadBlock := func(tci int, expHead *capella.BeaconBlock) {
+ var expNumber, headNumber uint64
+ if expHead != nil {
+ expNumber = uint64(expHead.Body.ExecutionPayload.BlockNumber)
+ }
+ select {
+ case event := <-headCh:
+ headNumber = event.HeadBlock.Number
+ default:
+ }
+ if headNumber != expNumber {
+ t.Errorf("Wrong head block in test case #%d (expected block number %d, got %d)", tci, expNumber, headNumber)
+ }
+ }
+
+ // no block requests expected until head tracker knows about a head
+ ts.Run(1)
+ expHeadBlock(1, nil)
+
+ // set block 1 as prefetch head, announced by server 2
+ head1 := blockHeadInfo(testBlock1)
+ ht.prefetch = head1
+ ts.ServerEvent(sync.EvNewHead, testServer2, head1)
+ // expect request to server 2 which has announced the head
+ ts.Run(2, testServer2, sync.ReqBeaconBlock(head1.BlockRoot))
+
+ // valid response
+ ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testBlock1)
+ ts.AddAllowance(testServer2, 1)
+ ts.Run(3)
+ // head block still not expected as the fetched block is not the validated head yet
+ expHeadBlock(3, nil)
+
+ // set as validated head, expect no further requests but block 1 set as head block
+ ht.validated.Header = blockHeader(testBlock1)
+ ts.Run(4)
+ expHeadBlock(4, testBlock1)
+
+ // set block 2 as prefetch head, announced by server 1
+ head2 := blockHeadInfo(testBlock2)
+ ht.prefetch = head2
+ ts.ServerEvent(sync.EvNewHead, testServer1, head2)
+ // expect request to server 1
+ ts.Run(5, testServer1, sync.ReqBeaconBlock(head2.BlockRoot))
+
+ // req2 fails, no further requests expected because server 2 has not announced it
+ ts.RequestEvent(request.EvFail, ts.Request(5, 1), nil)
+ ts.Run(6)
+
+ // set as validated head before retrieving block; now it's assumed to be available from server 2 too
+ ht.validated.Header = blockHeader(testBlock2)
+ // expect req2 retry to server 2
+ ts.Run(7, testServer2, sync.ReqBeaconBlock(head2.BlockRoot))
+ // now head block should be unavailable again
+ expHeadBlock(4, nil)
+
+ // valid response, now head block should be block 2 immediately as it is already validated
+ ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testBlock2)
+ ts.Run(8)
+ expHeadBlock(5, testBlock2)
+}
+
+func blockHeader(block *capella.BeaconBlock) types.Header {
+ return types.Header{
+ Slot: uint64(block.Slot),
+ ProposerIndex: uint64(block.ProposerIndex),
+ ParentRoot: common.Hash(block.ParentRoot),
+ StateRoot: common.Hash(block.StateRoot),
+ BodyRoot: common.Hash(block.Body.HashTreeRoot(configs.Mainnet, tree.GetHashFn())),
+ }
+}
+
+type testHeadTracker struct {
+ prefetch types.HeadInfo
+ validated types.SignedHeader
+}
+
+func (h *testHeadTracker) PrefetchHead() types.HeadInfo {
+ return h.prefetch
+}
+
+func (h *testHeadTracker) ValidatedHead() (types.SignedHeader, bool) {
+ return h.validated, h.validated.Header != (types.Header{})
+}
+
+// TODO add test case for finality
+func (h *testHeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
+ return types.FinalityUpdate{
+ Attested: types.HeaderWithExecProof{Header: h.validated.Header},
+ Finalized: types.HeaderWithExecProof{PayloadHeader: &capella.ExecutionPayloadHeader{}},
+ Signature: h.validated.Signature,
+ SignatureSlot: h.validated.SignatureSlot,
+ }, h.validated.Header != (types.Header{})
+}
diff --git a/beacon/blsync/client.go b/beacon/blsync/client.go
new file mode 100644
index 0000000000..1bfbb13160
--- /dev/null
+++ b/beacon/blsync/client.go
@@ -0,0 +1,103 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blsync
+
+import (
+ "strings"
+
+ "github.com/ethereum/go-ethereum/beacon/light"
+ "github.com/ethereum/go-ethereum/beacon/light/api"
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/light/sync"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/urfave/cli/v2"
+)
+
+type Client struct {
+ scheduler *request.Scheduler
+ chainHeadFeed *event.Feed
+ urls []string
+ customHeader map[string]string
+}
+
+func NewClient(ctx *cli.Context) *Client {
+ if !ctx.IsSet(utils.BeaconApiFlag.Name) {
+ utils.Fatalf("Beacon node light client API URL not specified")
+ }
+ var (
+ chainConfig = makeChainConfig(ctx)
+ customHeader = make(map[string]string)
+ )
+ for _, s := range ctx.StringSlice(utils.BeaconApiHeaderFlag.Name) {
+ kv := strings.Split(s, ":")
+ if len(kv) != 2 {
+ utils.Fatalf("Invalid custom API header entry: %s", s)
+ }
+ customHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
+ }
+ // create data structures
+ var (
+ db = memorydb.New()
+ threshold = ctx.Int(utils.BeaconThresholdFlag.Name)
+ committeeChain = light.NewCommitteeChain(db, chainConfig.ChainConfig, threshold, !ctx.Bool(utils.BeaconNoFilterFlag.Name))
+ headTracker = light.NewHeadTracker(committeeChain, threshold)
+ )
+ headSync := sync.NewHeadSync(headTracker, committeeChain)
+
+ // set up scheduler and sync modules
+ chainHeadFeed := new(event.Feed)
+ scheduler := request.NewScheduler()
+ checkpointInit := sync.NewCheckpointInit(committeeChain, chainConfig.Checkpoint)
+ forwardSync := sync.NewForwardUpdateSync(committeeChain)
+ beaconBlockSync := newBeaconBlockSync(headTracker, chainHeadFeed)
+ scheduler.RegisterTarget(headTracker)
+ scheduler.RegisterTarget(committeeChain)
+ scheduler.RegisterModule(checkpointInit, "checkpointInit")
+ scheduler.RegisterModule(forwardSync, "forwardSync")
+ scheduler.RegisterModule(headSync, "headSync")
+ scheduler.RegisterModule(beaconBlockSync, "beaconBlockSync")
+
+ return &Client{
+ scheduler: scheduler,
+ urls: ctx.StringSlice(utils.BeaconApiFlag.Name),
+ customHeader: customHeader,
+ chainHeadFeed: chainHeadFeed,
+ }
+}
+
+// SubscribeChainHeadEvent allows callers to subscribe a provided channel to new
+// head updates.
+func (c *Client) SubscribeChainHeadEvent(ch chan<- types.ChainHeadEvent) event.Subscription {
+ return c.chainHeadFeed.Subscribe(ch)
+}
+
+func (c *Client) Start() {
+ c.scheduler.Start()
+ // register server(s)
+ for _, url := range c.urls {
+ beaconApi := api.NewBeaconLightApi(url, c.customHeader)
+ c.scheduler.RegisterServer(request.NewServer(api.NewApiServer(beaconApi), &mclock.System{}))
+ }
+}
+
+func (c *Client) Stop() {
+ c.scheduler.Stop()
+}
diff --git a/beacon/blsync/config.go b/beacon/blsync/config.go
new file mode 100644
index 0000000000..b51d3e2b05
--- /dev/null
+++ b/beacon/blsync/config.go
@@ -0,0 +1,113 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package blsync
+
+import (
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/urfave/cli/v2"
+)
+
+// lightClientConfig contains beacon light client configuration
+type lightClientConfig struct {
+ *types.ChainConfig
+ Checkpoint common.Hash
+}
+
+var (
+ MainnetConfig = lightClientConfig{
+ ChainConfig: (&types.ChainConfig{
+ GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"),
+ GenesisTime: 1606824023,
+ }).
+ AddFork("GENESIS", 0, []byte{0, 0, 0, 0}).
+ AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}).
+ AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}).
+ AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}),
+ Checkpoint: common.HexToHash("0x388be41594ec7d6a6894f18c73f3469f07e2c19a803de4755d335817ed8e2e5a"),
+ }
+
+ SepoliaConfig = lightClientConfig{
+ ChainConfig: (&types.ChainConfig{
+ GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"),
+ GenesisTime: 1655733600,
+ }).
+ AddFork("GENESIS", 0, []byte{144, 0, 0, 105}).
+ AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}).
+ AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}).
+ AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}),
+ Checkpoint: common.HexToHash("0x1005a6d9175e96bfbce4d35b80f468e9bff0b674e1e861d16e09e10005a58e81"),
+ }
+
+ GoerliConfig = lightClientConfig{
+ ChainConfig: (&types.ChainConfig{
+ GenesisValidatorsRoot: common.HexToHash("0x043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb"),
+ GenesisTime: 1614588812,
+ }).
+ AddFork("GENESIS", 0, []byte{0, 0, 16, 32}).
+ AddFork("ALTAIR", 36660, []byte{1, 0, 16, 32}).
+ AddFork("BELLATRIX", 112260, []byte{2, 0, 16, 32}).
+ AddFork("CAPELLA", 162304, []byte{3, 0, 16, 32}),
+ Checkpoint: common.HexToHash("0x53a0f4f0a378e2c4ae0a9ee97407eb69d0d737d8d8cd0a5fb1093f42f7b81c49"),
+ }
+)
+
+func makeChainConfig(ctx *cli.Context) lightClientConfig {
+ utils.CheckExclusive(ctx, utils.MainnetFlag, utils.GoerliFlag, utils.SepoliaFlag)
+ customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name) || ctx.IsSet(utils.BeaconGenesisRootFlag.Name) || ctx.IsSet(utils.BeaconGenesisTimeFlag.Name)
+ var config lightClientConfig
+ switch {
+ case ctx.Bool(utils.MainnetFlag.Name):
+ config = MainnetConfig
+ case ctx.Bool(utils.SepoliaFlag.Name):
+ config = SepoliaConfig
+ case ctx.Bool(utils.GoerliFlag.Name):
+ config = GoerliConfig
+ default:
+ if !customConfig {
+ config = MainnetConfig
+ }
+ }
+ if customConfig && config.Forks != nil {
+ utils.Fatalf("Cannot use custom beacon chain config flags in combination with pre-defined network config")
+ }
+ if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) {
+ if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 {
+ copy(config.GenesisValidatorsRoot[:len(c)], c)
+ } else {
+ utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err)
+ }
+ }
+ if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) {
+ config.GenesisTime = ctx.Uint64(utils.BeaconGenesisTimeFlag.Name)
+ }
+ if ctx.IsSet(utils.BeaconConfigFlag.Name) {
+ if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil {
+ utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err)
+ }
+ }
+ if ctx.IsSet(utils.BeaconCheckpointFlag.Name) {
+ if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 {
+ copy(config.Checkpoint[:len(c)], c)
+ } else {
+ utils.Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(utils.BeaconCheckpointFlag.Name), "error", err)
+ }
+ }
+ return config
+}
diff --git a/beacon/light/api/api_server.go b/beacon/light/api/api_server.go
new file mode 100755
index 0000000000..da044f4b2d
--- /dev/null
+++ b/beacon/light/api/api_server.go
@@ -0,0 +1,103 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package api
+
+import (
+ "reflect"
+
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/light/sync"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// ApiServer is a wrapper around BeaconLightApi that implements request.requestServer.
+type ApiServer struct {
+ api *BeaconLightApi
+ eventCallback func(event request.Event)
+ unsubscribe func()
+}
+
+// NewApiServer creates a new ApiServer.
+func NewApiServer(api *BeaconLightApi) *ApiServer {
+ return &ApiServer{api: api}
+}
+
+// Subscribe implements request.requestServer.
+func (s *ApiServer) Subscribe(eventCallback func(event request.Event)) {
+ s.eventCallback = eventCallback
+ listener := HeadEventListener{
+ OnNewHead: func(slot uint64, blockRoot common.Hash) {
+ log.Debug("New head received", "slot", slot, "blockRoot", blockRoot)
+ eventCallback(request.Event{Type: sync.EvNewHead, Data: types.HeadInfo{Slot: slot, BlockRoot: blockRoot}})
+ },
+ OnSignedHead: func(head types.SignedHeader) {
+ log.Debug("New signed head received", "slot", head.Header.Slot, "blockRoot", head.Header.Hash(), "signerCount", head.Signature.SignerCount())
+ eventCallback(request.Event{Type: sync.EvNewSignedHead, Data: head})
+ },
+ OnFinality: func(head types.FinalityUpdate) {
+ log.Debug("New finality update received", "slot", head.Attested.Slot, "blockRoot", head.Attested.Hash(), "signerCount", head.Signature.SignerCount())
+ eventCallback(request.Event{Type: sync.EvNewFinalityUpdate, Data: head})
+ },
+ OnError: func(err error) {
+ log.Warn("Head event stream error", "err", err)
+ },
+ }
+ s.unsubscribe = s.api.StartHeadListener(listener)
+}
+
+// SendRequest implements request.requestServer.
+func (s *ApiServer) SendRequest(id request.ID, req request.Request) {
+ go func() {
+ var resp request.Response
+ var err error
+ switch data := req.(type) {
+ case sync.ReqUpdates:
+ log.Debug("Beacon API: requesting light client update", "reqid", id, "period", data.FirstPeriod, "count", data.Count)
+ var r sync.RespUpdates
+ r.Updates, r.Committees, err = s.api.GetBestUpdatesAndCommittees(data.FirstPeriod, data.Count)
+ resp = r
+ case sync.ReqHeader:
+ log.Debug("Beacon API: requesting header", "reqid", id, "hash", common.Hash(data))
+ resp, err = s.api.GetHeader(common.Hash(data))
+ case sync.ReqCheckpointData:
+ log.Debug("Beacon API: requesting checkpoint data", "reqid", id, "hash", common.Hash(data))
+ resp, err = s.api.GetCheckpointData(common.Hash(data))
+ case sync.ReqBeaconBlock:
+ log.Debug("Beacon API: requesting block", "reqid", id, "hash", common.Hash(data))
+ resp, err = s.api.GetBeaconBlock(common.Hash(data))
+ default:
+ }
+
+ if err != nil {
+ log.Warn("Beacon API request failed", "type", reflect.TypeOf(req), "reqid", id, "err", err)
+ s.eventCallback(request.Event{Type: request.EvFail, Data: request.RequestResponse{ID: id, Request: req}})
+ } else {
+ s.eventCallback(request.Event{Type: request.EvResponse, Data: request.RequestResponse{ID: id, Request: req, Response: resp}})
+ }
+ }()
+}
+
+// Unsubscribe implements request.requestServer.
+// Note: Unsubscribe should not be called concurrently with Subscribe.
+func (s *ApiServer) Unsubscribe() {
+ if s.unsubscribe != nil {
+ s.unsubscribe()
+ s.unsubscribe = nil
+ }
+}
diff --git a/beacon/light/api/light_api.go b/beacon/light/api/light_api.go
new file mode 100755
index 0000000000..fd701dc0a8
--- /dev/null
+++ b/beacon/light/api/light_api.go
@@ -0,0 +1,496 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more detaiapi.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package api
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/donovanhide/eventsource"
+ "github.com/ethereum/go-ethereum/beacon/merkle"
+ "github.com/ethereum/go-ethereum/beacon/params"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/protolambda/zrnt/eth2/beacon/capella"
+ "github.com/protolambda/zrnt/eth2/configs"
+ "github.com/protolambda/ztyp/tree"
+)
+
+var (
+ ErrNotFound = errors.New("404 Not Found")
+ ErrInternal = errors.New("500 Internal Server Error")
+)
+
+type CommitteeUpdate struct {
+ Version string
+ Update types.LightClientUpdate
+ NextSyncCommittee types.SerializedSyncCommittee
+}
+
+// See data structure definition here:
+// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate
+type committeeUpdateJson struct {
+ Version string `json:"version"`
+ Data committeeUpdateData `json:"data"`
+}
+
+type committeeUpdateData struct {
+ Header jsonBeaconHeader `json:"attested_header"`
+ NextSyncCommittee types.SerializedSyncCommittee `json:"next_sync_committee"`
+ NextSyncCommitteeBranch merkle.Values `json:"next_sync_committee_branch"`
+ FinalizedHeader *jsonBeaconHeader `json:"finalized_header,omitempty"`
+ FinalityBranch merkle.Values `json:"finality_branch,omitempty"`
+ SyncAggregate types.SyncAggregate `json:"sync_aggregate"`
+ SignatureSlot common.Decimal `json:"signature_slot"`
+}
+
+type jsonBeaconHeader struct {
+ Beacon types.Header `json:"beacon"`
+}
+
+type jsonHeaderWithExecProof struct {
+ Beacon types.Header `json:"beacon"`
+ Execution *capella.ExecutionPayloadHeader `json:"execution"`
+ ExecutionBranch merkle.Values `json:"execution_branch"`
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (u *CommitteeUpdate) UnmarshalJSON(input []byte) error {
+ var dec committeeUpdateJson
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ u.Version = dec.Version
+ u.NextSyncCommittee = dec.Data.NextSyncCommittee
+ u.Update = types.LightClientUpdate{
+ AttestedHeader: types.SignedHeader{
+ Header: dec.Data.Header.Beacon,
+ Signature: dec.Data.SyncAggregate,
+ SignatureSlot: uint64(dec.Data.SignatureSlot),
+ },
+ NextSyncCommitteeRoot: u.NextSyncCommittee.Root(),
+ NextSyncCommitteeBranch: dec.Data.NextSyncCommitteeBranch,
+ FinalityBranch: dec.Data.FinalityBranch,
+ }
+ if dec.Data.FinalizedHeader != nil {
+ u.Update.FinalizedHeader = &dec.Data.FinalizedHeader.Beacon
+ }
+ return nil
+}
+
+// fetcher is an interface useful for debug-harnessing the http api.
+type fetcher interface {
+ Do(req *http.Request) (*http.Response, error)
+}
+
+// BeaconLightApi requests light client information from a beacon node REST API.
+// Note: all required API endpoints are currently only implemented by Lodestar.
+type BeaconLightApi struct {
+ url string
+ client fetcher
+ customHeaders map[string]string
+}
+
+func NewBeaconLightApi(url string, customHeaders map[string]string) *BeaconLightApi {
+ return &BeaconLightApi{
+ url: url,
+ client: &http.Client{
+ Timeout: time.Second * 10,
+ },
+ customHeaders: customHeaders,
+ }
+}
+
+func (api *BeaconLightApi) httpGet(path string) ([]byte, error) {
+ req, err := http.NewRequest("GET", api.url+path, nil)
+ if err != nil {
+ return nil, err
+ }
+ for k, v := range api.customHeaders {
+ req.Header.Set(k, v)
+ }
+ resp, err := api.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ switch resp.StatusCode {
+ case 200:
+ return io.ReadAll(resp.Body)
+ case 404:
+ return nil, ErrNotFound
+ case 500:
+ return nil, ErrInternal
+ default:
+ return nil, fmt.Errorf("unexpected error from API endpoint \"%s\": status code %d", path, resp.StatusCode)
+ }
+}
+
+func (api *BeaconLightApi) httpGetf(format string, params ...any) ([]byte, error) {
+ return api.httpGet(fmt.Sprintf(format, params...))
+}
+
+// GetBestUpdateAndCommittee fetches and validates LightClientUpdate for given
+// period and full serialized committee for the next period (committee root hash
+// equals update.NextSyncCommitteeRoot).
+// Note that the results are validated but the update signature should be verified
+// by the caller as its validity depends on the update chain.
+func (api *BeaconLightApi) GetBestUpdatesAndCommittees(firstPeriod, count uint64) ([]*types.LightClientUpdate, []*types.SerializedSyncCommittee, error) {
+ resp, err := api.httpGetf("/eth/v1/beacon/light_client/updates?start_period=%d&count=%d", firstPeriod, count)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var data []CommitteeUpdate
+ if err := json.Unmarshal(resp, &data); err != nil {
+ return nil, nil, err
+ }
+ if len(data) != int(count) {
+ return nil, nil, errors.New("invalid number of committee updates")
+ }
+ updates := make([]*types.LightClientUpdate, int(count))
+ committees := make([]*types.SerializedSyncCommittee, int(count))
+ for i, d := range data {
+ if d.Update.AttestedHeader.Header.SyncPeriod() != firstPeriod+uint64(i) {
+ return nil, nil, errors.New("wrong committee update header period")
+ }
+ if err := d.Update.Validate(); err != nil {
+ return nil, nil, err
+ }
+ if d.NextSyncCommittee.Root() != d.Update.NextSyncCommitteeRoot {
+ return nil, nil, errors.New("wrong sync committee root")
+ }
+ updates[i], committees[i] = new(types.LightClientUpdate), new(types.SerializedSyncCommittee)
+ *updates[i], *committees[i] = d.Update, d.NextSyncCommittee
+ }
+ return updates, committees, nil
+}
+
+// GetOptimisticHeadUpdate fetches a signed header based on the latest available
+// optimistic update. Note that the signature should be verified by the caller
+// as its validity depends on the update chain.
+//
+// See data structure definition here:
+// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
+func (api *BeaconLightApi) GetOptimisticHeadUpdate() (types.SignedHeader, error) {
+ resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update")
+ if err != nil {
+ return types.SignedHeader{}, err
+ }
+ return decodeOptimisticHeadUpdate(resp)
+}
+
+func decodeOptimisticHeadUpdate(enc []byte) (types.SignedHeader, error) {
+ var data struct {
+ Data struct {
+ Header jsonBeaconHeader `json:"attested_header"`
+ Aggregate types.SyncAggregate `json:"sync_aggregate"`
+ SignatureSlot common.Decimal `json:"signature_slot"`
+ } `json:"data"`
+ }
+ if err := json.Unmarshal(enc, &data); err != nil {
+ return types.SignedHeader{}, err
+ }
+ if data.Data.Header.Beacon.StateRoot == (common.Hash{}) {
+ // workaround for different event encoding format in Lodestar
+ if err := json.Unmarshal(enc, &data.Data); err != nil {
+ return types.SignedHeader{}, err
+ }
+ }
+
+ if len(data.Data.Aggregate.Signers) != params.SyncCommitteeBitmaskSize {
+ return types.SignedHeader{}, errors.New("invalid sync_committee_bits length")
+ }
+ if len(data.Data.Aggregate.Signature) != params.BLSSignatureSize {
+ return types.SignedHeader{}, errors.New("invalid sync_committee_signature length")
+ }
+ return types.SignedHeader{
+ Header: data.Data.Header.Beacon,
+ Signature: data.Data.Aggregate,
+ SignatureSlot: uint64(data.Data.SignatureSlot),
+ }, nil
+}
+
+// GetFinalityUpdate fetches the latest available finality update.
+//
+// See data structure definition here:
+// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
+func (api *BeaconLightApi) GetFinalityUpdate() (types.FinalityUpdate, error) {
+ resp, err := api.httpGet("/eth/v1/beacon/light_client/finality_update")
+ if err != nil {
+ return types.FinalityUpdate{}, err
+ }
+ return decodeFinalityUpdate(resp)
+}
+
+func decodeFinalityUpdate(enc []byte) (types.FinalityUpdate, error) {
+ var data struct {
+ Data struct {
+ Attested jsonHeaderWithExecProof `json:"attested_header"`
+ Finalized jsonHeaderWithExecProof `json:"finalized_header"`
+ FinalityBranch merkle.Values `json:"finality_branch"`
+ Aggregate types.SyncAggregate `json:"sync_aggregate"`
+ SignatureSlot common.Decimal `json:"signature_slot"`
+ } `json:"data"`
+ }
+ if err := json.Unmarshal(enc, &data); err != nil {
+ return types.FinalityUpdate{}, err
+ }
+
+ if len(data.Data.Aggregate.Signers) != params.SyncCommitteeBitmaskSize {
+ return types.FinalityUpdate{}, errors.New("invalid sync_committee_bits length")
+ }
+ if len(data.Data.Aggregate.Signature) != params.BLSSignatureSize {
+ return types.FinalityUpdate{}, errors.New("invalid sync_committee_signature length")
+ }
+ return types.FinalityUpdate{
+ Attested: types.HeaderWithExecProof{
+ Header: data.Data.Attested.Beacon,
+ PayloadHeader: data.Data.Attested.Execution,
+ PayloadBranch: data.Data.Attested.ExecutionBranch,
+ },
+ Finalized: types.HeaderWithExecProof{
+ Header: data.Data.Finalized.Beacon,
+ PayloadHeader: data.Data.Finalized.Execution,
+ PayloadBranch: data.Data.Finalized.ExecutionBranch,
+ },
+ FinalityBranch: data.Data.FinalityBranch,
+ Signature: data.Data.Aggregate,
+ SignatureSlot: uint64(data.Data.SignatureSlot),
+ }, nil
+}
+
+// GetHead fetches and validates the beacon header with the given blockRoot.
+// If blockRoot is null hash then the latest head header is fetched.
+func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error) {
+ var blockId string
+ if blockRoot == (common.Hash{}) {
+ blockId = "head"
+ } else {
+ blockId = blockRoot.Hex()
+ }
+ resp, err := api.httpGetf("/eth/v1/beacon/headers/%s", blockId)
+ if err != nil {
+ return types.Header{}, err
+ }
+
+ var data struct {
+ Data struct {
+ Root common.Hash `json:"root"`
+ Canonical bool `json:"canonical"`
+ Header struct {
+ Message types.Header `json:"message"`
+ Signature hexutil.Bytes `json:"signature"`
+ } `json:"header"`
+ } `json:"data"`
+ }
+ if err := json.Unmarshal(resp, &data); err != nil {
+ return types.Header{}, err
+ }
+ header := data.Data.Header.Message
+ if blockRoot == (common.Hash{}) {
+ blockRoot = data.Data.Root
+ }
+ if header.Hash() != blockRoot {
+ return types.Header{}, errors.New("retrieved beacon header root does not match")
+ }
+ return header, nil
+}
+
+// GetCheckpointData fetches and validates bootstrap data belonging to the given checkpoint.
+func (api *BeaconLightApi) GetCheckpointData(checkpointHash common.Hash) (*types.BootstrapData, error) {
+ resp, err := api.httpGetf("/eth/v1/beacon/light_client/bootstrap/0x%x", checkpointHash[:])
+ if err != nil {
+ return nil, err
+ }
+
+ // See data structure definition here:
+ // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
+ type bootstrapData struct {
+ Data struct {
+ Header jsonBeaconHeader `json:"header"`
+ Committee *types.SerializedSyncCommittee `json:"current_sync_committee"`
+ CommitteeBranch merkle.Values `json:"current_sync_committee_branch"`
+ } `json:"data"`
+ }
+
+ var data bootstrapData
+ if err := json.Unmarshal(resp, &data); err != nil {
+ return nil, err
+ }
+ if data.Data.Committee == nil {
+ return nil, errors.New("sync committee is missing")
+ }
+ header := data.Data.Header.Beacon
+ if header.Hash() != checkpointHash {
+ return nil, fmt.Errorf("invalid checkpoint block header, have %v want %v", header.Hash(), checkpointHash)
+ }
+ checkpoint := &types.BootstrapData{
+ Header: header,
+ CommitteeBranch: data.Data.CommitteeBranch,
+ CommitteeRoot: data.Data.Committee.Root(),
+ Committee: data.Data.Committee,
+ }
+ if err := checkpoint.Validate(); err != nil {
+ return nil, fmt.Errorf("invalid checkpoint: %w", err)
+ }
+ if checkpoint.Header.Hash() != checkpointHash {
+ return nil, errors.New("wrong checkpoint hash")
+ }
+ return checkpoint, nil
+}
+
+func (api *BeaconLightApi) GetBeaconBlock(blockRoot common.Hash) (*capella.BeaconBlock, error) {
+ resp, err := api.httpGetf("/eth/v2/beacon/blocks/0x%x", blockRoot)
+ if err != nil {
+ return nil, err
+ }
+
+ var beaconBlockMessage struct {
+ Data struct {
+ Message capella.BeaconBlock `json:"message"`
+ } `json:"data"`
+ }
+ if err := json.Unmarshal(resp, &beaconBlockMessage); err != nil {
+ return nil, fmt.Errorf("invalid block json data: %v", err)
+ }
+ beaconBlock := new(capella.BeaconBlock)
+ *beaconBlock = beaconBlockMessage.Data.Message
+ root := common.Hash(beaconBlock.HashTreeRoot(configs.Mainnet, tree.GetHashFn()))
+ if root != blockRoot {
+ return nil, fmt.Errorf("Beacon block root hash mismatch (expected: %x, got: %x)", blockRoot, root)
+ }
+ return beaconBlock, nil
+}
+
+func decodeHeadEvent(enc []byte) (uint64, common.Hash, error) {
+ var data struct {
+ Slot common.Decimal `json:"slot"`
+ Block common.Hash `json:"block"`
+ }
+ if err := json.Unmarshal(enc, &data); err != nil {
+ return 0, common.Hash{}, err
+ }
+ return uint64(data.Slot), data.Block, nil
+}
+
+type HeadEventListener struct {
+ OnNewHead func(slot uint64, blockRoot common.Hash)
+ OnSignedHead func(head types.SignedHeader)
+ OnFinality func(head types.FinalityUpdate)
+ OnError func(err error)
+}
+
+// StartHeadListener creates an event subscription for heads and signed (optimistic)
+// head updates and calls the specified callback functions when they are received.
+// The callbacks are also called for the current head and optimistic head at startup.
+// They are never called concurrently.
+func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func() {
+ closeCh := make(chan struct{}) // initiate closing the stream
+ closedCh := make(chan struct{}) // stream closed (or failed to create)
+ stoppedCh := make(chan struct{}) // sync loop stopped
+ streamCh := make(chan *eventsource.Stream, 1)
+ go func() {
+ defer close(closedCh)
+ // when connected to a Lodestar node the subscription blocks until the
+ // first actual event arrives; therefore we create the subscription in
+ // a separate goroutine while letting the main goroutine sync up to the
+ // current head
+ req, err := http.NewRequest("GET", api.url+
+ "/eth/v1/events?topics=head&topics=light_client_optimistic_update&topics=light_client_finality_update", nil)
+ if err != nil {
+ listener.OnError(fmt.Errorf("error creating event subscription request: %v", err))
+ return
+ }
+ for k, v := range api.customHeaders {
+ req.Header.Set(k, v)
+ }
+ stream, err := eventsource.SubscribeWithRequest("", req)
+ if err != nil {
+ listener.OnError(fmt.Errorf("error creating event subscription: %v", err))
+ close(streamCh)
+ return
+ }
+ streamCh <- stream
+ <-closeCh
+ stream.Close()
+ }()
+
+ go func() {
+ defer close(stoppedCh)
+
+ if head, err := api.GetHeader(common.Hash{}); err == nil {
+ listener.OnNewHead(head.Slot, head.Hash())
+ }
+ if signedHead, err := api.GetOptimisticHeadUpdate(); err == nil {
+ listener.OnSignedHead(signedHead)
+ }
+ if finalityUpdate, err := api.GetFinalityUpdate(); err == nil {
+ listener.OnFinality(finalityUpdate)
+ }
+ stream := <-streamCh
+ if stream == nil {
+ return
+ }
+
+ for {
+ select {
+ case event, ok := <-stream.Events:
+ if !ok {
+ break
+ }
+ switch event.Event() {
+ case "head":
+ if slot, blockRoot, err := decodeHeadEvent([]byte(event.Data())); err == nil {
+ listener.OnNewHead(slot, blockRoot)
+ } else {
+ listener.OnError(fmt.Errorf("error decoding head event: %v", err))
+ }
+ case "light_client_optimistic_update":
+ if signedHead, err := decodeOptimisticHeadUpdate([]byte(event.Data())); err == nil {
+ listener.OnSignedHead(signedHead)
+ } else {
+ listener.OnError(fmt.Errorf("error decoding optimistic update event: %v", err))
+ }
+ case "light_client_finality_update":
+ if finalityUpdate, err := decodeFinalityUpdate([]byte(event.Data())); err == nil {
+ listener.OnFinality(finalityUpdate)
+ } else {
+ listener.OnError(fmt.Errorf("error decoding finality update event: %v", err))
+ }
+ default:
+ listener.OnError(fmt.Errorf("unexpected event: %s", event.Event()))
+ }
+ case err, ok := <-stream.Errors:
+ if !ok {
+ break
+ }
+ listener.OnError(err)
+ }
+ }
+ }()
+ return func() {
+ close(closeCh)
+ <-closedCh
+ <-stoppedCh
+ }
+}
diff --git a/beacon/light/committee_chain.go b/beacon/light/committee_chain.go
index d707f8cc34..a8d032bb65 100644
--- a/beacon/light/committee_chain.go
+++ b/beacon/light/committee_chain.go
@@ -70,6 +70,7 @@ type CommitteeChain struct {
committees *canonicalStore[*types.SerializedSyncCommittee]
fixedCommitteeRoots *canonicalStore[common.Hash]
committeeCache *lru.Cache[uint64, syncCommittee] // cache deserialized committees
+ changeCounter uint64
clock mclock.Clock // monotonic clock (simulated clock in tests)
unixNano func() int64 // system clock (simulated clock in tests)
@@ -86,6 +87,11 @@ func NewCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signer
return newCommitteeChain(db, config, signerThreshold, enforceTime, blsVerifier{}, &mclock.System{}, func() int64 { return time.Now().UnixNano() })
}
+// NewTestCommitteeChain creates a new CommitteeChain for testing.
+func NewTestCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, clock *mclock.Simulated) *CommitteeChain {
+ return newCommitteeChain(db, config, signerThreshold, enforceTime, dummyVerifier{}, clock, func() int64 { return int64(clock.Now()) })
+}
+
// newCommitteeChain creates a new CommitteeChain with the option of replacing the
// clock source and signature verification for testing purposes.
func newCommitteeChain(db ethdb.KeyValueStore, config *types.ChainConfig, signerThreshold int, enforceTime bool, sigVerifier committeeSigVerifier, clock mclock.Clock, unixNano func() int64) *CommitteeChain {
@@ -181,20 +187,20 @@ func (s *CommitteeChain) Reset() {
if err := s.rollback(0); err != nil {
log.Error("Error writing batch into chain database", "error", err)
}
+ s.changeCounter++
}
-// CheckpointInit initializes a CommitteeChain based on the checkpoint.
+// CheckpointInit initializes a CommitteeChain based on a checkpoint.
// Note: if the chain is already initialized and the committees proven by the
// checkpoint do match the existing chain then the chain is retained and the
// new checkpoint becomes fixed.
-func (s *CommitteeChain) CheckpointInit(bootstrap *types.BootstrapData) error {
+func (s *CommitteeChain) CheckpointInit(bootstrap types.BootstrapData) error {
s.chainmu.Lock()
defer s.chainmu.Unlock()
if err := bootstrap.Validate(); err != nil {
return err
}
-
period := bootstrap.Header.SyncPeriod()
if err := s.deleteFixedCommitteeRootsFrom(period + 2); err != nil {
s.Reset()
@@ -215,6 +221,7 @@ func (s *CommitteeChain) CheckpointInit(bootstrap *types.BootstrapData) error {
s.Reset()
return err
}
+ s.changeCounter++
return nil
}
@@ -367,6 +374,7 @@ func (s *CommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommi
return ErrWrongCommitteeRoot
}
}
+ s.changeCounter++
if reorg {
if err := s.rollback(period + 1); err != nil {
return err
@@ -405,6 +413,13 @@ func (s *CommitteeChain) NextSyncPeriod() (uint64, bool) {
return s.committees.periods.End - 1, true
}
+func (s *CommitteeChain) ChangeCounter() uint64 {
+ s.chainmu.RLock()
+ defer s.chainmu.RUnlock()
+
+ return s.changeCounter
+}
+
// rollback removes all committees and fixed roots from the given period and updates
// starting from the previous period.
func (s *CommitteeChain) rollback(period uint64) error {
@@ -452,12 +467,12 @@ func (s *CommitteeChain) getSyncCommittee(period uint64) (syncCommittee, error)
if sc, ok := s.committees.get(s.db, period); ok {
c, err := s.sigVerifier.deserializeSyncCommittee(sc)
if err != nil {
- return nil, fmt.Errorf("Sync committee #%d deserialization error: %v", period, err)
+ return nil, fmt.Errorf("sync committee #%d deserialization error: %v", period, err)
}
s.committeeCache.Add(period, c)
return c, nil
}
- return nil, fmt.Errorf("Missing serialized sync committee #%d", period)
+ return nil, fmt.Errorf("missing serialized sync committee #%d", period)
}
// VerifySignedHeader returns true if the given signed header has a valid signature
diff --git a/beacon/light/committee_chain_test.go b/beacon/light/committee_chain_test.go
index 60ea2a0efd..57b6d7175c 100644
--- a/beacon/light/committee_chain_test.go
+++ b/beacon/light/committee_chain_test.go
@@ -241,12 +241,12 @@ func newCommitteeChainTest(t *testing.T, config types.ChainConfig, signerThresho
signerThreshold: signerThreshold,
enforceTime: enforceTime,
}
- c.chain = newCommitteeChain(c.db, &config, signerThreshold, enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) })
+ c.chain = NewTestCommitteeChain(c.db, &config, signerThreshold, enforceTime, c.clock)
return c
}
func (c *committeeChainTest) reloadChain() {
- c.chain = newCommitteeChain(c.db, &c.config, c.signerThreshold, c.enforceTime, dummyVerifier{}, c.clock, func() int64 { return int64(c.clock.Now()) })
+ c.chain = NewTestCommitteeChain(c.db, &c.config, c.signerThreshold, c.enforceTime, c.clock)
}
func (c *committeeChainTest) setClockPeriod(period float64) {
diff --git a/beacon/light/head_tracker.go b/beacon/light/head_tracker.go
new file mode 100644
index 0000000000..579e1b53da
--- /dev/null
+++ b/beacon/light/head_tracker.go
@@ -0,0 +1,150 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package light
+
+import (
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// HeadTracker keeps track of the latest validated head and the "prefetch" head
+// which is the (not necessarily validated) head announced by the majority of
+// servers.
+type HeadTracker struct {
+ lock sync.RWMutex
+ committeeChain *CommitteeChain
+ minSignerCount int
+ signedHead types.SignedHeader
+ hasSignedHead bool
+ finalityUpdate types.FinalityUpdate
+ hasFinalityUpdate bool
+ prefetchHead types.HeadInfo
+ changeCounter uint64
+}
+
+// NewHeadTracker creates a new HeadTracker.
+func NewHeadTracker(committeeChain *CommitteeChain, minSignerCount int) *HeadTracker {
+ return &HeadTracker{
+ committeeChain: committeeChain,
+ minSignerCount: minSignerCount,
+ }
+}
+
+// ValidatedHead returns the latest validated head.
+func (h *HeadTracker) ValidatedHead() (types.SignedHeader, bool) {
+ h.lock.RLock()
+ defer h.lock.RUnlock()
+
+ return h.signedHead, h.hasSignedHead
+}
+
+// ValidatedHead returns the latest validated head.
+func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
+ h.lock.RLock()
+ defer h.lock.RUnlock()
+
+ return h.finalityUpdate, h.hasFinalityUpdate
+}
+
+// Validate validates the given signed head. If the head is successfully validated
+// and it is better than the old validated head (higher slot or same slot and more
+// signers) then ValidatedHead is updated. The boolean return flag signals if
+// ValidatedHead has been changed.
+func (h *HeadTracker) ValidateHead(head types.SignedHeader) (bool, error) {
+ h.lock.Lock()
+ defer h.lock.Unlock()
+
+ replace, err := h.validate(head, h.signedHead)
+ if replace {
+ h.signedHead, h.hasSignedHead = head, true
+ h.changeCounter++
+ }
+ return replace, err
+}
+
+func (h *HeadTracker) ValidateFinality(update types.FinalityUpdate) (bool, error) {
+ h.lock.Lock()
+ defer h.lock.Unlock()
+
+ replace, err := h.validate(update.SignedHeader(), h.finalityUpdate.SignedHeader())
+ if replace {
+ h.finalityUpdate, h.hasFinalityUpdate = update, true
+ h.changeCounter++
+ }
+ return replace, err
+}
+
+func (h *HeadTracker) validate(head, oldHead types.SignedHeader) (bool, error) {
+ signerCount := head.Signature.SignerCount()
+ if signerCount < h.minSignerCount {
+ return false, errors.New("low signer count")
+ }
+ if head.Header.Slot < oldHead.Header.Slot || (head.Header.Slot == oldHead.Header.Slot && signerCount <= oldHead.Signature.SignerCount()) {
+ return false, nil
+ }
+ sigOk, age, err := h.committeeChain.VerifySignedHeader(head)
+ if err != nil {
+ return false, err
+ }
+ if age < 0 {
+ log.Warn("Future signed head received", "age", age)
+ }
+ if age > time.Minute*2 {
+ log.Warn("Old signed head received", "age", age)
+ }
+ if !sigOk {
+ return false, errors.New("invalid header signature")
+ }
+ return true, nil
+}
+
+// PrefetchHead returns the latest known prefetch head's head info.
+// This head can be used to start fetching related data hoping that it will be
+// validated soon.
+// Note that the prefetch head cannot be validated cryptographically so it should
+// only be used as a performance optimization hint.
+func (h *HeadTracker) PrefetchHead() types.HeadInfo {
+ h.lock.RLock()
+ defer h.lock.RUnlock()
+
+ return h.prefetchHead
+}
+
+// SetPrefetchHead sets the prefetch head info.
+// Note that HeadTracker does not verify the prefetch head, just acts as a thread
+// safe bulletin board.
+func (h *HeadTracker) SetPrefetchHead(head types.HeadInfo) {
+ h.lock.Lock()
+ defer h.lock.Unlock()
+
+ if head == h.prefetchHead {
+ return
+ }
+ h.prefetchHead = head
+ h.changeCounter++
+}
+
+func (h *HeadTracker) ChangeCounter() uint64 {
+ h.lock.RLock()
+ defer h.lock.RUnlock()
+
+ return h.changeCounter
+}
diff --git a/beacon/light/request/scheduler.go b/beacon/light/request/scheduler.go
new file mode 100644
index 0000000000..20f811900e
--- /dev/null
+++ b/beacon/light/request/scheduler.go
@@ -0,0 +1,401 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package request
+
+import (
+ "sync"
+
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// Module represents a mechanism which is typically responsible for downloading
+// and updating a passive data structure. It does not directly interact with the
+// servers. It can start requests using the Requester interface, maintain its
+// internal state by receiving and processing Events and update its target data
+// structure based on the obtained data.
+// It is the Scheduler's responsibility to feed events to the modules, call
+// Process as long as there might be something to process and then generate request
+// candidates using MakeRequest and start the best possible requests.
+// Modules are called by Scheduler whenever a global trigger is fired. All events
+// fire the trigger. Changing a target data structure also triggers a next
+// processing round as it could make further actions possible either by the same
+// or another Module.
+type Module interface {
+ // Process is a non-blocking function responsible for starting requests,
+ // processing events and updating the target data structures(s) and the
+ // internal state of the module. Module state typically consists of information
+ // about pending requests and registered servers.
+ // Process is always called after an event is received or after a target data
+ // structure has been changed.
+ //
+ // Note: Process functions of different modules are never called concurrently;
+ // they are called by Scheduler in the same order of priority as they were
+ // registered in.
+ Process(Requester, []Event)
+}
+
+// Requester allows Modules to obtain the list of momentarily available servers,
+// start new requests and report server failure when a response has been proven
+// to be invalid in the processing phase.
+// Note that all Requester functions should be safe to call from Module.Process.
+type Requester interface {
+ CanSendTo() []Server
+ Send(Server, Request) ID
+ Fail(Server, string)
+}
+
+// Scheduler is a modular network data retrieval framework that coordinates multiple
+// servers and retrieval mechanisms (modules). It implements a trigger mechanism
+// that calls the Process function of registered modules whenever either the state
+// of existing data structures or events coming from registered servers could
+// allow new operations.
+type Scheduler struct {
+ lock sync.Mutex
+ modules []Module // first has highest priority
+ names map[Module]string
+ servers map[server]struct{}
+ targets map[targetData]uint64
+
+ requesterLock sync.RWMutex
+ serverOrder []server
+ pending map[ServerAndID]pendingRequest
+
+ // eventLock guards access to the events list. Note that eventLock can be
+ // locked either while lock is locked or unlocked but lock cannot be locked
+ // while eventLock is locked.
+ eventLock sync.Mutex
+ events []Event
+ stopCh chan chan struct{}
+
+ triggerCh chan struct{} // restarts waiting sync loop
+ // if trigger has already been fired then send to testWaitCh blocks until
+ // the triggered processing round is finished
+ testWaitCh chan struct{}
+}
+
+type (
+ // Server identifies a server without allowing any direct interaction.
+ // Note: server interface is used by Scheduler and Tracker but not used by
+ // the modules that do not interact with them directly.
+ // In order to make module testing easier, Server interface is used in
+ // events and modules.
+ Server any
+ Request any
+ Response any
+ ID uint64
+ ServerAndID struct {
+ Server Server
+ ID ID
+ }
+)
+
+// targetData represents a registered target data structure that increases its
+// ChangeCounter whenever it has been changed.
+type targetData interface {
+ ChangeCounter() uint64
+}
+
+// pendingRequest keeps track of sent and not yet finalized requests and their
+// sender modules.
+type pendingRequest struct {
+ request Request
+ module Module
+}
+
+// NewScheduler creates a new Scheduler.
+func NewScheduler() *Scheduler {
+ s := &Scheduler{
+ servers: make(map[server]struct{}),
+ names: make(map[Module]string),
+ pending: make(map[ServerAndID]pendingRequest),
+ targets: make(map[targetData]uint64),
+ stopCh: make(chan chan struct{}),
+ // Note: testWaitCh should not have capacity in order to ensure
+ // that after a trigger happens testWaitCh will block until the resulting
+ // processing round has been finished
+ triggerCh: make(chan struct{}, 1),
+ testWaitCh: make(chan struct{}),
+ }
+ return s
+}
+
+// RegisterTarget registers a target data structure, ensuring that any changes
+// made to it trigger a new round of Module.Process calls, giving a chance to
+// modules to react to the changes.
+func (s *Scheduler) RegisterTarget(t targetData) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.targets[t] = 0
+}
+
+// RegisterModule registers a module. Should be called before starting the scheduler.
+// In each processing round the order of module processing depends on the order of
+// registration.
+func (s *Scheduler) RegisterModule(m Module, name string) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.modules = append(s.modules, m)
+ s.names[m] = name
+}
+
+// RegisterServer registers a new server.
+func (s *Scheduler) RegisterServer(server server) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.addEvent(Event{Type: EvRegistered, Server: server})
+ server.subscribe(func(event Event) {
+ event.Server = server
+ s.addEvent(event)
+ })
+}
+
+// UnregisterServer removes a registered server.
+func (s *Scheduler) UnregisterServer(server server) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ server.unsubscribe()
+ s.addEvent(Event{Type: EvUnregistered, Server: server})
+}
+
+// Start starts the scheduler. It should be called after registering all modules
+// and before registering any servers.
+func (s *Scheduler) Start() {
+ go s.syncLoop()
+}
+
+// Stop stops the scheduler.
+func (s *Scheduler) Stop() {
+ stop := make(chan struct{})
+ s.stopCh <- stop
+ <-stop
+ s.lock.Lock()
+ for server := range s.servers {
+ server.unsubscribe()
+ }
+ s.servers = nil
+ s.lock.Unlock()
+}
+
+// syncLoop is the main event loop responsible for event/data processing and
+// sending new requests.
+// A round of processing starts whenever the global trigger is fired. Triggers
+// fired during a processing round ensure that there is going to be a next round.
+func (s *Scheduler) syncLoop() {
+ for {
+ s.lock.Lock()
+ s.processRound()
+ s.lock.Unlock()
+ loop:
+ for {
+ select {
+ case stop := <-s.stopCh:
+ close(stop)
+ return
+ case <-s.triggerCh:
+ break loop
+ case <-s.testWaitCh:
+ }
+ }
+ }
+}
+
+// targetChanged returns true if a registered target data structure has been
+// changed since the last call to this function.
+func (s *Scheduler) targetChanged() (changed bool) {
+ for target, counter := range s.targets {
+ if newCounter := target.ChangeCounter(); newCounter != counter {
+ s.targets[target] = newCounter
+ changed = true
+ }
+ }
+ return
+}
+
+// processRound runs an entire processing round. It calls the Process functions
+// of all modules, passing all relevant events and repeating Process calls as
+// long as any changes have been made to the registered target data structures.
+// Once all events have been processed and a stable state has been achieved,
+// requests are generated and sent if necessary and possible.
+func (s *Scheduler) processRound() {
+ for {
+ log.Trace("Processing modules")
+ filteredEvents := s.filterEvents()
+ for _, module := range s.modules {
+ log.Trace("Processing module", "name", s.names[module], "events", len(filteredEvents[module]))
+ module.Process(requester{s, module}, filteredEvents[module])
+ }
+ if !s.targetChanged() {
+ break
+ }
+ }
+}
+
+// Trigger starts a new processing round. If fired during processing, it ensures
+// another full round of processing all modules.
+func (s *Scheduler) Trigger() {
+ select {
+ case s.triggerCh <- struct{}{}:
+ default:
+ }
+}
+
+// addEvent adds an event to be processed in the next round. Note that it can be
+// called regardless of the state of the lock mutex, making it safe for use in
+// the server event callback.
+func (s *Scheduler) addEvent(event Event) {
+ s.eventLock.Lock()
+ s.events = append(s.events, event)
+ s.eventLock.Unlock()
+ s.Trigger()
+}
+
+// filterEvent sorts each Event either as a request event or a server event,
+// depending on its type. Request events are also sorted in a map based on the
+// module that originally initiated the request. It also ensures that no events
+// related to a server are returned before EvRegistered or after EvUnregistered.
+// In case of an EvUnregistered server event it also closes all pending requests
+// to the given server by adding a failed request event (EvFail), ensuring that
+// all requests get finalized and thereby allowing the module logic to be safe
+// and simple.
+func (s *Scheduler) filterEvents() map[Module][]Event {
+ s.eventLock.Lock()
+ events := s.events
+ s.events = nil
+ s.eventLock.Unlock()
+
+ s.requesterLock.Lock()
+ defer s.requesterLock.Unlock()
+
+ filteredEvents := make(map[Module][]Event)
+ for _, event := range events {
+ server := event.Server.(server)
+ if _, ok := s.servers[server]; !ok && event.Type != EvRegistered {
+ continue // before EvRegister or after EvUnregister, discard
+ }
+
+ if event.IsRequestEvent() {
+ sid, _, _ := event.RequestInfo()
+ pending, ok := s.pending[sid]
+ if !ok {
+ continue // request already closed, ignore further events
+ }
+ if event.Type == EvResponse || event.Type == EvFail {
+ delete(s.pending, sid) // final event, close pending request
+ }
+ filteredEvents[pending.module] = append(filteredEvents[pending.module], event)
+ } else {
+ switch event.Type {
+ case EvRegistered:
+ s.servers[server] = struct{}{}
+ s.serverOrder = append(s.serverOrder, nil)
+ copy(s.serverOrder[1:], s.serverOrder[:len(s.serverOrder)-1])
+ s.serverOrder[0] = server
+ case EvUnregistered:
+ s.closePending(event.Server, filteredEvents)
+ delete(s.servers, server)
+ for i, srv := range s.serverOrder {
+ if srv == server {
+ copy(s.serverOrder[i:len(s.serverOrder)-1], s.serverOrder[i+1:])
+ s.serverOrder = s.serverOrder[:len(s.serverOrder)-1]
+ break
+ }
+ }
+ }
+ for _, module := range s.modules {
+ filteredEvents[module] = append(filteredEvents[module], event)
+ }
+ }
+ }
+ return filteredEvents
+}
+
+// closePending closes all pending requests to the given server and adds an EvFail
+// event to properly finalize them
+func (s *Scheduler) closePending(server Server, filteredEvents map[Module][]Event) {
+ for sid, pending := range s.pending {
+ if sid.Server == server {
+ filteredEvents[pending.module] = append(filteredEvents[pending.module], Event{
+ Type: EvFail,
+ Server: server,
+ Data: RequestResponse{
+ ID: sid.ID,
+ Request: pending.request,
+ },
+ })
+ delete(s.pending, sid)
+ }
+ }
+}
+
+// requester implements Requester. Note that while requester basically wraps
+// Scheduler (with the added information of the currently processed Module), all
+// functions are safe to call from Module.Process which is running while
+// the Scheduler.lock mutex is held.
+type requester struct {
+ *Scheduler
+ module Module
+}
+
+// CanSendTo returns the list of currently available servers. It also returns
+// them in an order of least to most recently used, ensuring a round-robin usage
+// of suitable servers if the module always chooses the first suitable one.
+func (s requester) CanSendTo() []Server {
+ s.requesterLock.RLock()
+ defer s.requesterLock.RUnlock()
+
+ list := make([]Server, 0, len(s.serverOrder))
+ for _, server := range s.serverOrder {
+ if server.canRequestNow() {
+ list = append(list, server)
+ }
+ }
+ return list
+}
+
+// Send sends a request and adds an entry to Scheduler.pending map, ensuring that
+// related request events will be delivered to the sender Module.
+func (s requester) Send(srv Server, req Request) ID {
+ s.requesterLock.Lock()
+ defer s.requesterLock.Unlock()
+
+ server := srv.(server)
+ id := server.sendRequest(req)
+ sid := ServerAndID{Server: srv, ID: id}
+ s.pending[sid] = pendingRequest{request: req, module: s.module}
+ for i, ss := range s.serverOrder {
+ if ss == server {
+ copy(s.serverOrder[i:len(s.serverOrder)-1], s.serverOrder[i+1:])
+ s.serverOrder[len(s.serverOrder)-1] = server
+ return id
+ }
+ }
+ log.Error("Target server not found in ordered list of registered servers")
+ return id
+}
+
+// Fail should be called when a server delivers invalid or useless information.
+// Calling Fail disables the given server for a period that is initially short
+// but is exponentially growing if it happens frequently. This results in a
+// somewhat fault tolerant operation that avoids hammering servers with requests
+// that they cannot serve but still gives them a chance periodically.
+func (s requester) Fail(srv Server, desc string) {
+ srv.(server).fail(desc)
+}
diff --git a/beacon/light/request/scheduler_test.go b/beacon/light/request/scheduler_test.go
new file mode 100644
index 0000000000..7d5a567078
--- /dev/null
+++ b/beacon/light/request/scheduler_test.go
@@ -0,0 +1,122 @@
+package request
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestEventFilter(t *testing.T) {
+ s := NewScheduler()
+ module1 := &testModule{name: "module1"}
+ module2 := &testModule{name: "module2"}
+ s.RegisterModule(module1, "module1")
+ s.RegisterModule(module2, "module2")
+ s.Start()
+ // startup process round without events
+ s.testWaitCh <- struct{}{}
+ module1.expProcess(t, nil)
+ module2.expProcess(t, nil)
+ srv := &testServer{}
+ // register server; both modules should receive server event
+ s.RegisterServer(srv)
+ s.testWaitCh <- struct{}{}
+ module1.expProcess(t, []Event{
+ {Type: EvRegistered, Server: srv},
+ })
+ module2.expProcess(t, []Event{
+ {Type: EvRegistered, Server: srv},
+ })
+ // let module1 send a request
+ srv.canRequest = 1
+ module1.sendReq = testRequest
+ s.Trigger()
+ // in first triggered round module1 sends the request, no events yet
+ s.testWaitCh <- struct{}{}
+ module1.expProcess(t, nil)
+ module2.expProcess(t, nil)
+ // server emits EvTimeout; only module1 should receive it
+ srv.eventCb(Event{Type: EvTimeout, Data: RequestResponse{ID: 1, Request: testRequest}})
+ s.testWaitCh <- struct{}{}
+ module1.expProcess(t, []Event{
+ {Type: EvTimeout, Server: srv, Data: RequestResponse{ID: 1, Request: testRequest}},
+ })
+ module2.expProcess(t, nil)
+ // unregister server; both modules should receive server event
+ s.UnregisterServer(srv)
+ s.testWaitCh <- struct{}{}
+ module1.expProcess(t, []Event{
+ // module1 should also receive EvFail on its pending request
+ {Type: EvFail, Server: srv, Data: RequestResponse{ID: 1, Request: testRequest}},
+ {Type: EvUnregistered, Server: srv},
+ })
+ module2.expProcess(t, []Event{
+ {Type: EvUnregistered, Server: srv},
+ })
+ // response after server unregistered; should be discarded
+ srv.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
+ s.testWaitCh <- struct{}{}
+ module1.expProcess(t, nil)
+ module2.expProcess(t, nil)
+ // no more process rounds expected; shut down
+ s.testWaitCh <- struct{}{}
+ module1.expNoMoreProcess(t)
+ module2.expNoMoreProcess(t)
+ s.Stop()
+}
+
+type testServer struct {
+ eventCb func(Event)
+ lastID ID
+ canRequest int
+}
+
+func (s *testServer) subscribe(eventCb func(Event)) {
+ s.eventCb = eventCb
+}
+
+func (s *testServer) canRequestNow() bool {
+ return s.canRequest > 0
+}
+
+func (s *testServer) sendRequest(req Request) ID {
+ s.canRequest--
+ s.lastID++
+ return s.lastID
+}
+
+func (s *testServer) fail(string) {}
+func (s *testServer) unsubscribe() {}
+
+type testModule struct {
+ name string
+ processed [][]Event
+ sendReq Request
+}
+
+func (m *testModule) Process(requester Requester, events []Event) {
+ m.processed = append(m.processed, events)
+ if m.sendReq != nil {
+ if cs := requester.CanSendTo(); len(cs) > 0 {
+ requester.Send(cs[0], m.sendReq)
+ }
+ }
+}
+
+func (m *testModule) expProcess(t *testing.T, expEvents []Event) {
+ if len(m.processed) == 0 {
+ t.Errorf("Missing call to %s.Process", m.name)
+ return
+ }
+ events := m.processed[0]
+ m.processed = m.processed[1:]
+ if !reflect.DeepEqual(events, expEvents) {
+ t.Errorf("Call to %s.Process with wrong events (expected %v, got %v)", m.name, expEvents, events)
+ }
+}
+
+func (m *testModule) expNoMoreProcess(t *testing.T) {
+ for len(m.processed) > 0 {
+ t.Errorf("Unexpected call to %s.Process with events %v", m.name, m.processed[0])
+ m.processed = m.processed[1:]
+ }
+}
diff --git a/beacon/light/request/server.go b/beacon/light/request/server.go
new file mode 100644
index 0000000000..999f64178a
--- /dev/null
+++ b/beacon/light/request/server.go
@@ -0,0 +1,439 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package request
+
+import (
+ "math"
+ "sync"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+var (
+ // request events
+ EvResponse = &EventType{Name: "response", requestEvent: true} // data: RequestResponse; sent by requestServer
+ EvFail = &EventType{Name: "fail", requestEvent: true} // data: RequestResponse; sent by requestServer
+ EvTimeout = &EventType{Name: "timeout", requestEvent: true} // data: RequestResponse; sent by serverWithTimeout
+ // server events
+ EvRegistered = &EventType{Name: "registered"} // data: nil; sent by Scheduler
+ EvUnregistered = &EventType{Name: "unregistered"} // data: nil; sent by Scheduler
+ EvCanRequestAgain = &EventType{Name: "canRequestAgain"} // data: nil; sent by serverWithLimits
+)
+
+const (
+ softRequestTimeout = time.Second // allow resending request to a different server but do not cancel yet
+ hardRequestTimeout = time.Second * 10 // cancel request
+)
+
+const (
+ // serverWithLimits parameters
+ parallelAdjustUp = 0.1 // adjust parallelLimit up in case of success under full load
+ parallelAdjustDown = 1 // adjust parallelLimit down in case of timeout/failure
+ minParallelLimit = 1 // parallelLimit lower bound
+ defaultParallelLimit = 3 // parallelLimit initial value
+ minFailureDelay = time.Millisecond * 100 // minimum disable time in case of request failure
+ maxFailureDelay = time.Minute // maximum disable time in case of request failure
+ maxServerEventBuffer = 5 // server event allowance buffer limit
+ maxServerEventRate = time.Second // server event allowance buffer recharge rate
+)
+
+// requestServer can send requests in a non-blocking way and feed back events
+// through the event callback. After each request it should send back either
+// EvResponse or EvFail. Additionally, it may also send application-defined
+// events that the Modules can interpret.
+type requestServer interface {
+ Subscribe(eventCallback func(Event))
+ SendRequest(ID, Request)
+ Unsubscribe()
+}
+
+// server is implemented by a requestServer wrapped into serverWithTimeout and
+// serverWithLimits and is used by Scheduler.
+// In addition to requestServer functionality, server can also handle timeouts,
+// limit the number of parallel in-flight requests and temporarily disable
+// new requests based on timeouts and response failures.
+type server interface {
+ subscribe(eventCallback func(Event))
+ canRequestNow() bool
+ sendRequest(Request) ID
+ fail(string)
+ unsubscribe()
+}
+
+// NewServer wraps a requestServer and returns a server
+func NewServer(rs requestServer, clock mclock.Clock) server {
+ s := &serverWithLimits{}
+ s.parent = rs
+ s.serverWithTimeout.init(clock)
+ s.init()
+ return s
+}
+
+// EventType identifies an event type, either related to a request or the server
+// in general. Server events can also be externally defined.
+type EventType struct {
+ Name string
+ requestEvent bool // all request events are pre-defined in request package
+}
+
+// Event describes an event where the type of Data depends on Type.
+// Server field is not required when sent through the event callback; it is filled
+// out when processed by the Scheduler. Note that the Scheduler can also create
+// and send events (EvRegistered, EvUnregistered) directly.
+type Event struct {
+ Type *EventType
+ Server Server // filled by Scheduler
+ Data any
+}
+
+// IsRequestEvent returns true if the event is a request event
+func (e *Event) IsRequestEvent() bool {
+ return e.Type.requestEvent
+}
+
+// RequestInfo assumes that the event is a request event and returns its contents
+// in a convenient form.
+func (e *Event) RequestInfo() (ServerAndID, Request, Response) {
+ data := e.Data.(RequestResponse)
+ return ServerAndID{Server: e.Server, ID: data.ID}, data.Request, data.Response
+}
+
+// RequestResponse is the Data type of request events.
+type RequestResponse struct {
+ ID ID
+ Request Request
+ Response Response
+}
+
+// serverWithTimeout wraps a requestServer and introduces timeouts.
+// The request's lifecycle is concluded if EvResponse or EvFail emitted by the
+// parent requestServer. If this does not happen until softRequestTimeout then
+// EvTimeout is emitted, after which the final EvResponse or EvFail is still
+// guaranteed to follow.
+// If the parent fails to send this final event for hardRequestTimeout then
+// serverWithTimeout emits EvFail and discards any further events from the
+// parent related to the given request.
+type serverWithTimeout struct {
+ parent requestServer
+ lock sync.Mutex
+ clock mclock.Clock
+ childEventCb func(event Event)
+ timeouts map[ID]mclock.Timer
+ lastID ID
+}
+
+// init initializes serverWithTimeout
+func (s *serverWithTimeout) init(clock mclock.Clock) {
+ s.clock = clock
+ s.timeouts = make(map[ID]mclock.Timer)
+}
+
+// subscribe subscribes to events which include parent (requestServer) events
+// plus EvTimeout.
+func (s *serverWithTimeout) subscribe(eventCallback func(event Event)) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.childEventCb = eventCallback
+ s.parent.Subscribe(s.eventCallback)
+}
+
+// sendRequest generated a new request ID, emits EvRequest, sets up the timeout
+// timer, then sends the request through the parent (requestServer).
+func (s *serverWithTimeout) sendRequest(request Request) (reqId ID) {
+ s.lock.Lock()
+ s.lastID++
+ id := s.lastID
+ s.startTimeout(RequestResponse{ID: id, Request: request})
+ s.lock.Unlock()
+ s.parent.SendRequest(id, request)
+ return id
+}
+
+// eventCallback is called by parent (requestServer) event subscription.
+func (s *serverWithTimeout) eventCallback(event Event) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ switch event.Type {
+ case EvResponse, EvFail:
+ id := event.Data.(RequestResponse).ID
+ if timer, ok := s.timeouts[id]; ok {
+ // Note: if stopping the timer is unsuccessful then the resulting AfterFunc
+ // call will just do nothing
+ timer.Stop()
+ delete(s.timeouts, id)
+ s.childEventCb(event)
+ }
+ default:
+ s.childEventCb(event)
+ }
+}
+
+// startTimeout starts a timeout timer for the given request.
+func (s *serverWithTimeout) startTimeout(reqData RequestResponse) {
+ id := reqData.ID
+ s.timeouts[id] = s.clock.AfterFunc(softRequestTimeout, func() {
+ s.lock.Lock()
+ if _, ok := s.timeouts[id]; !ok {
+ s.lock.Unlock()
+ return
+ }
+ s.timeouts[id] = s.clock.AfterFunc(hardRequestTimeout-softRequestTimeout, func() {
+ s.lock.Lock()
+ if _, ok := s.timeouts[id]; !ok {
+ s.lock.Unlock()
+ return
+ }
+ delete(s.timeouts, id)
+ childEventCb := s.childEventCb
+ s.lock.Unlock()
+ childEventCb(Event{Type: EvFail, Data: reqData})
+ })
+ childEventCb := s.childEventCb
+ s.lock.Unlock()
+ childEventCb(Event{Type: EvTimeout, Data: reqData})
+ })
+}
+
+// stop stops all goroutines associated with the server.
+func (s *serverWithTimeout) unsubscribe() {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ for _, timer := range s.timeouts {
+ if timer != nil {
+ timer.Stop()
+ }
+ }
+ s.childEventCb = nil
+ s.parent.Unsubscribe()
+}
+
+// serverWithLimits wraps serverWithTimeout and implements server. It limits the
+// number of parallel in-flight requests and prevents sending new requests when a
+// pending one has already timed out. Server events are also rate limited.
+// It also implements a failure delay mechanism that adds an exponentially growing
+// delay each time a request fails (wrong answer or hard timeout). This makes the
+// syncing mechanism less brittle as temporary failures of the server might happen
+// sometimes, but still avoids hammering a non-functional server with requests.
+type serverWithLimits struct {
+ serverWithTimeout
+ lock sync.Mutex
+ childEventCb func(event Event)
+ softTimeouts map[ID]struct{}
+ pendingCount, timeoutCount int
+ parallelLimit float32
+ sendEvent bool
+ delayTimer mclock.Timer
+ delayCounter int
+ failureDelayEnd mclock.AbsTime
+ failureDelay float64
+ serverEventBuffer int
+ eventBufferUpdated mclock.AbsTime
+}
+
+// init initializes serverWithLimits
+func (s *serverWithLimits) init() {
+ s.softTimeouts = make(map[ID]struct{})
+ s.parallelLimit = defaultParallelLimit
+ s.serverEventBuffer = maxServerEventBuffer
+}
+
+// subscribe subscribes to events which include parent (serverWithTimeout) events
+// plus EvCanRequstAgain.
+func (s *serverWithLimits) subscribe(eventCallback func(event Event)) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.childEventCb = eventCallback
+ s.serverWithTimeout.subscribe(s.eventCallback)
+}
+
+// eventCallback is called by parent (serverWithTimeout) event subscription.
+func (s *serverWithLimits) eventCallback(event Event) {
+ s.lock.Lock()
+ var sendCanRequestAgain bool
+ passEvent := true
+ switch event.Type {
+ case EvTimeout:
+ id := event.Data.(RequestResponse).ID
+ s.softTimeouts[id] = struct{}{}
+ s.timeoutCount++
+ s.parallelLimit -= parallelAdjustDown
+ if s.parallelLimit < minParallelLimit {
+ s.parallelLimit = minParallelLimit
+ }
+ log.Debug("Server timeout", "count", s.timeoutCount, "parallelLimit", s.parallelLimit)
+ case EvResponse, EvFail:
+ id := event.Data.(RequestResponse).ID
+ if _, ok := s.softTimeouts[id]; ok {
+ delete(s.softTimeouts, id)
+ s.timeoutCount--
+ log.Debug("Server timeout finalized", "count", s.timeoutCount, "parallelLimit", s.parallelLimit)
+ }
+ if event.Type == EvResponse && s.pendingCount >= int(s.parallelLimit) {
+ s.parallelLimit += parallelAdjustUp
+ }
+ s.pendingCount--
+ if s.canRequest() {
+ sendCanRequestAgain = s.sendEvent
+ s.sendEvent = false
+ }
+ if event.Type == EvFail {
+ s.failLocked("failed request")
+ }
+ default:
+ // server event; check rate limit
+ if s.serverEventBuffer < maxServerEventBuffer {
+ now := s.clock.Now()
+ sinceUpdate := time.Duration(now - s.eventBufferUpdated)
+ if sinceUpdate >= maxServerEventRate*time.Duration(maxServerEventBuffer-s.serverEventBuffer) {
+ s.serverEventBuffer = maxServerEventBuffer
+ s.eventBufferUpdated = now
+ } else {
+ addBuffer := int(sinceUpdate / maxServerEventRate)
+ s.serverEventBuffer += addBuffer
+ s.eventBufferUpdated += mclock.AbsTime(maxServerEventRate * time.Duration(addBuffer))
+ }
+ }
+ if s.serverEventBuffer > 0 {
+ s.serverEventBuffer--
+ } else {
+ passEvent = false
+ }
+ }
+ childEventCb := s.childEventCb
+ s.lock.Unlock()
+ if passEvent {
+ childEventCb(event)
+ }
+ if sendCanRequestAgain {
+ childEventCb(Event{Type: EvCanRequestAgain})
+ }
+}
+
+// sendRequest sends a request through the parent (serverWithTimeout).
+func (s *serverWithLimits) sendRequest(request Request) (reqId ID) {
+ s.lock.Lock()
+ s.pendingCount++
+ s.lock.Unlock()
+ return s.serverWithTimeout.sendRequest(request)
+}
+
+// stop stops all goroutines associated with the server.
+func (s *serverWithLimits) unsubscribe() {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ if s.delayTimer != nil {
+ s.delayTimer.Stop()
+ s.delayTimer = nil
+ }
+ s.childEventCb = nil
+ s.serverWithTimeout.unsubscribe()
+}
+
+// canRequest checks whether a new request can be started.
+func (s *serverWithLimits) canRequest() bool {
+ if s.delayTimer != nil || s.pendingCount >= int(s.parallelLimit) || s.timeoutCount > 0 {
+ return false
+ }
+ if s.parallelLimit < minParallelLimit {
+ s.parallelLimit = minParallelLimit
+ }
+ return true
+}
+
+// canRequestNow checks whether a new request can be started, according to the
+// current in-flight request count and parallelLimit, and also the failure delay
+// timer.
+// If it returns false then it is guaranteed that an EvCanRequestAgain will be
+// sent whenever the server becomes available for requesting again.
+func (s *serverWithLimits) canRequestNow() bool {
+ var sendCanRequestAgain bool
+ s.lock.Lock()
+ canRequest := s.canRequest()
+ if canRequest {
+ sendCanRequestAgain = s.sendEvent
+ s.sendEvent = false
+ }
+ childEventCb := s.childEventCb
+ s.lock.Unlock()
+ if sendCanRequestAgain {
+ childEventCb(Event{Type: EvCanRequestAgain})
+ }
+ return canRequest
+}
+
+// delay sets the delay timer to the given duration, disabling new requests for
+// the given period.
+func (s *serverWithLimits) delay(delay time.Duration) {
+ if s.delayTimer != nil {
+ // Note: if stopping the timer is unsuccessful then the resulting AfterFunc
+ // call will just do nothing
+ s.delayTimer.Stop()
+ s.delayTimer = nil
+ }
+
+ s.delayCounter++
+ delayCounter := s.delayCounter
+ log.Debug("Server delay started", "length", delay)
+ s.delayTimer = s.clock.AfterFunc(delay, func() {
+ log.Debug("Server delay ended", "length", delay)
+ var sendCanRequestAgain bool
+ s.lock.Lock()
+ if s.delayTimer != nil && s.delayCounter == delayCounter { // do nothing if there is a new timer now
+ s.delayTimer = nil
+ if s.canRequest() {
+ sendCanRequestAgain = s.sendEvent
+ s.sendEvent = false
+ }
+ }
+ childEventCb := s.childEventCb
+ s.lock.Unlock()
+ if sendCanRequestAgain {
+ childEventCb(Event{Type: EvCanRequestAgain})
+ }
+ })
+}
+
+// fail reports that a response from the server was found invalid by the processing
+// Module, disabling new requests for a dynamically adjused time period.
+func (s *serverWithLimits) fail(desc string) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ s.failLocked(desc)
+}
+
+// failLocked calculates the dynamic failure delay and applies it.
+func (s *serverWithLimits) failLocked(desc string) {
+ log.Debug("Server error", "description", desc)
+ s.failureDelay *= 2
+ now := s.clock.Now()
+ if now > s.failureDelayEnd {
+ s.failureDelay *= math.Pow(2, -float64(now-s.failureDelayEnd)/float64(maxFailureDelay))
+ }
+ if s.failureDelay < float64(minFailureDelay) {
+ s.failureDelay = float64(minFailureDelay)
+ }
+ s.failureDelayEnd = now + mclock.AbsTime(s.failureDelay)
+ s.delay(time.Duration(s.failureDelay))
+}
diff --git a/beacon/light/request/server_test.go b/beacon/light/request/server_test.go
new file mode 100644
index 0000000000..b6b9edf9a0
--- /dev/null
+++ b/beacon/light/request/server_test.go
@@ -0,0 +1,158 @@
+package request
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
+)
+
+const (
+ testRequest = "Life, the Universe, and Everything"
+ testResponse = 42
+)
+
+var testEventType = &EventType{Name: "testEvent"}
+
+func TestServerEvents(t *testing.T) {
+ rs := &testRequestServer{}
+ clock := &mclock.Simulated{}
+ srv := NewServer(rs, clock)
+ var lastEventType *EventType
+ srv.subscribe(func(event Event) { lastEventType = event.Type })
+ evTypeName := func(evType *EventType) string {
+ if evType == nil {
+ return "none"
+ }
+ return evType.Name
+ }
+ expEvent := func(expType *EventType) {
+ if lastEventType != expType {
+ t.Errorf("Wrong event type (expected %s, got %s)", evTypeName(expType), evTypeName(lastEventType))
+ }
+ lastEventType = nil
+ }
+ // user events should simply be passed through
+ rs.eventCb(Event{Type: testEventType})
+ expEvent(testEventType)
+ // send request, soft timeout, then valid response
+ srv.sendRequest(testRequest)
+ clock.WaitForTimers(1)
+ clock.Run(softRequestTimeout)
+ expEvent(EvTimeout)
+ rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
+ expEvent(EvResponse)
+ // send request, hard timeout (response after hard timeout should be ignored)
+ srv.sendRequest(testRequest)
+ clock.WaitForTimers(1)
+ clock.Run(softRequestTimeout)
+ expEvent(EvTimeout)
+ clock.WaitForTimers(1)
+ clock.Run(hardRequestTimeout)
+ expEvent(EvFail)
+ rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
+ expEvent(nil)
+}
+
+func TestServerParallel(t *testing.T) {
+ rs := &testRequestServer{}
+ srv := NewServer(rs, &mclock.Simulated{})
+ srv.subscribe(func(event Event) {})
+
+ expSend := func(expSent int) {
+ var sent int
+ for sent <= expSent {
+ if !srv.canRequestNow() {
+ break
+ }
+ sent++
+ srv.sendRequest(testRequest)
+ }
+ if sent != expSent {
+ t.Errorf("Wrong number of parallel requests accepted (expected %d, got %d)", expSent, sent)
+ }
+ }
+ // max out parallel allowance
+ expSend(defaultParallelLimit)
+ // 1 answered, should accept 1 more
+ rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
+ expSend(1)
+ // 2 answered, should accept 2 more
+ rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 2, Request: testRequest, Response: testResponse}})
+ rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 3, Request: testRequest, Response: testResponse}})
+ expSend(2)
+ // failed request, should decrease allowance and not accept more
+ rs.eventCb(Event{Type: EvFail, Data: RequestResponse{ID: 4, Request: testRequest}})
+ expSend(0)
+ srv.unsubscribe()
+}
+
+func TestServerFail(t *testing.T) {
+ rs := &testRequestServer{}
+ clock := &mclock.Simulated{}
+ srv := NewServer(rs, clock)
+ srv.subscribe(func(event Event) {})
+ expCanRequest := func(expCanRequest bool) {
+ if canRequest := srv.canRequestNow(); canRequest != expCanRequest {
+ t.Errorf("Wrong result for canRequestNow (expected %v, got %v)", expCanRequest, canRequest)
+ }
+ }
+ // timed out request
+ expCanRequest(true)
+ srv.sendRequest(testRequest)
+ clock.WaitForTimers(1)
+ expCanRequest(true)
+ clock.Run(softRequestTimeout)
+ expCanRequest(false) // cannot request when there is a timed out request
+ rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}})
+ expCanRequest(true)
+ // explicit server.Fail
+ srv.fail("")
+ clock.WaitForTimers(1)
+ expCanRequest(false) // cannot request for a while after a failure
+ clock.Run(minFailureDelay)
+ expCanRequest(true)
+ // request returned with EvFail
+ srv.sendRequest(testRequest)
+ rs.eventCb(Event{Type: EvFail, Data: RequestResponse{ID: 2, Request: testRequest}})
+ clock.WaitForTimers(1)
+ expCanRequest(false) // EvFail should also start failure delay
+ clock.Run(minFailureDelay)
+ expCanRequest(false) // second failure delay is longer, should still be disabled
+ clock.Run(minFailureDelay)
+ expCanRequest(true)
+ srv.unsubscribe()
+}
+
+func TestServerEventRateLimit(t *testing.T) {
+ rs := &testRequestServer{}
+ clock := &mclock.Simulated{}
+ srv := NewServer(rs, clock)
+ var eventCount int
+ srv.subscribe(func(event Event) {
+ if !event.IsRequestEvent() {
+ eventCount++
+ }
+ })
+ expEvents := func(send, expAllowed int) {
+ eventCount = 0
+ for sent := 0; sent < send; sent++ {
+ rs.eventCb(Event{Type: testEventType})
+ }
+ if eventCount != expAllowed {
+ t.Errorf("Wrong number of server events passing rate limitation (sent %d, expected %d, got %d)", send, expAllowed, eventCount)
+ }
+ }
+ expEvents(maxServerEventBuffer+5, maxServerEventBuffer)
+ clock.Run(maxServerEventRate)
+ expEvents(5, 1)
+ clock.Run(maxServerEventRate * maxServerEventBuffer * 2)
+ expEvents(maxServerEventBuffer+5, maxServerEventBuffer)
+}
+
+type testRequestServer struct {
+ eventCb func(Event)
+}
+
+func (rs *testRequestServer) Subscribe(eventCb func(Event)) { rs.eventCb = eventCb }
+func (rs *testRequestServer) SendRequest(ID, Request) {}
+func (rs *testRequestServer) Unsubscribe() {}
diff --git a/beacon/light/sync/head_sync.go b/beacon/light/sync/head_sync.go
new file mode 100644
index 0000000000..9fef95b0df
--- /dev/null
+++ b/beacon/light/sync/head_sync.go
@@ -0,0 +1,176 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package sync
+
+import (
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/types"
+)
+
+type headTracker interface {
+ ValidateHead(head types.SignedHeader) (bool, error)
+ ValidateFinality(head types.FinalityUpdate) (bool, error)
+ SetPrefetchHead(head types.HeadInfo)
+}
+
+// HeadSync implements request.Module; it updates the validated and prefetch
+// heads of HeadTracker based on the EvHead and EvSignedHead events coming from
+// registered servers.
+// It can also postpone the validation of the latest announced signed head
+// until the committee chain is synced up to at least the required period.
+type HeadSync struct {
+ headTracker headTracker
+ chain committeeChain
+ nextSyncPeriod uint64
+ chainInit bool
+ unvalidatedHeads map[request.Server]types.SignedHeader
+ unvalidatedFinality map[request.Server]types.FinalityUpdate
+ serverHeads map[request.Server]types.HeadInfo
+ headServerCount map[types.HeadInfo]headServerCount
+ headCounter uint64
+ prefetchHead types.HeadInfo
+}
+
+// headServerCount is associated with most recently seen head infos; it counts
+// the number of servers currently having the given head info as their announced
+// head and a counter signaling how recent that head is.
+// This data is used for selecting the prefetch head.
+type headServerCount struct {
+ serverCount int
+ headCounter uint64
+}
+
+// NewHeadSync creates a new HeadSync.
+func NewHeadSync(headTracker headTracker, chain committeeChain) *HeadSync {
+ s := &HeadSync{
+ headTracker: headTracker,
+ chain: chain,
+ unvalidatedHeads: make(map[request.Server]types.SignedHeader),
+ unvalidatedFinality: make(map[request.Server]types.FinalityUpdate),
+ serverHeads: make(map[request.Server]types.HeadInfo),
+ headServerCount: make(map[types.HeadInfo]headServerCount),
+ }
+ return s
+}
+
+// Process implements request.Module.
+func (s *HeadSync) Process(requester request.Requester, events []request.Event) {
+ for _, event := range events {
+ switch event.Type {
+ case EvNewHead:
+ s.setServerHead(event.Server, event.Data.(types.HeadInfo))
+ case EvNewSignedHead:
+ s.newSignedHead(event.Server, event.Data.(types.SignedHeader))
+ case EvNewFinalityUpdate:
+ s.newFinalityUpdate(event.Server, event.Data.(types.FinalityUpdate))
+ case request.EvUnregistered:
+ s.setServerHead(event.Server, types.HeadInfo{})
+ delete(s.serverHeads, event.Server)
+ delete(s.unvalidatedHeads, event.Server)
+ }
+ }
+
+ nextPeriod, chainInit := s.chain.NextSyncPeriod()
+ if nextPeriod != s.nextSyncPeriod || chainInit != s.chainInit {
+ s.nextSyncPeriod, s.chainInit = nextPeriod, chainInit
+ s.processUnvalidated()
+ }
+}
+
+// newSignedHead handles received signed head; either validates it if the chain
+// is properly synced or stores it for further validation.
+func (s *HeadSync) newSignedHead(server request.Server, signedHead types.SignedHeader) {
+ if !s.chainInit || types.SyncPeriod(signedHead.SignatureSlot) > s.nextSyncPeriod {
+ s.unvalidatedHeads[server] = signedHead
+ return
+ }
+ s.headTracker.ValidateHead(signedHead)
+}
+
+// newSignedHead handles received signed head; either validates it if the chain
+// is properly synced or stores it for further validation.
+func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) {
+ if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod {
+ s.unvalidatedFinality[server] = finalityUpdate
+ return
+ }
+ s.headTracker.ValidateFinality(finalityUpdate)
+}
+
+// processUnvalidatedHeads iterates the list of unvalidated heads and validates
+// those which can be validated.
+func (s *HeadSync) processUnvalidated() {
+ if !s.chainInit {
+ return
+ }
+ for server, signedHead := range s.unvalidatedHeads {
+ if types.SyncPeriod(signedHead.SignatureSlot) <= s.nextSyncPeriod {
+ s.headTracker.ValidateHead(signedHead)
+ delete(s.unvalidatedHeads, server)
+ }
+ }
+ for server, finalityUpdate := range s.unvalidatedFinality {
+ if types.SyncPeriod(finalityUpdate.SignatureSlot) <= s.nextSyncPeriod {
+ s.headTracker.ValidateFinality(finalityUpdate)
+ delete(s.unvalidatedFinality, server)
+ }
+ }
+}
+
+// setServerHead processes non-validated server head announcements and updates
+// the prefetch head if necessary.
+func (s *HeadSync) setServerHead(server request.Server, head types.HeadInfo) bool {
+ if oldHead, ok := s.serverHeads[server]; ok {
+ if head == oldHead {
+ return false
+ }
+ h := s.headServerCount[oldHead]
+ if h.serverCount--; h.serverCount > 0 {
+ s.headServerCount[oldHead] = h
+ } else {
+ delete(s.headServerCount, oldHead)
+ }
+ }
+ if head != (types.HeadInfo{}) {
+ h, ok := s.headServerCount[head]
+ if !ok {
+ s.headCounter++
+ h.headCounter = s.headCounter
+ }
+ h.serverCount++
+ s.headServerCount[head] = h
+ s.serverHeads[server] = head
+ } else {
+ delete(s.serverHeads, server)
+ }
+ var (
+ bestHead types.HeadInfo
+ bestHeadInfo headServerCount
+ )
+ for head, headServerCount := range s.headServerCount {
+ if headServerCount.serverCount > bestHeadInfo.serverCount ||
+ (headServerCount.serverCount == bestHeadInfo.serverCount && headServerCount.headCounter > bestHeadInfo.headCounter) {
+ bestHead, bestHeadInfo = head, headServerCount
+ }
+ }
+ if bestHead == s.prefetchHead {
+ return false
+ }
+ s.prefetchHead = bestHead
+ s.headTracker.SetPrefetchHead(bestHead)
+ return true
+}
diff --git a/beacon/light/sync/head_sync_test.go b/beacon/light/sync/head_sync_test.go
new file mode 100644
index 0000000000..12faad6292
--- /dev/null
+++ b/beacon/light/sync/head_sync_test.go
@@ -0,0 +1,151 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package sync
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var (
+ testServer1 = "testServer1"
+ testServer2 = "testServer2"
+ testServer3 = "testServer3"
+ testServer4 = "testServer4"
+
+ testHead0 = types.HeadInfo{}
+ testHead1 = types.HeadInfo{Slot: 123, BlockRoot: common.Hash{1}}
+ testHead2 = types.HeadInfo{Slot: 124, BlockRoot: common.Hash{2}}
+ testHead3 = types.HeadInfo{Slot: 124, BlockRoot: common.Hash{3}}
+ testHead4 = types.HeadInfo{Slot: 125, BlockRoot: common.Hash{4}}
+
+ testSHead1 = types.SignedHeader{SignatureSlot: 0x0124, Header: types.Header{Slot: 0x0123, StateRoot: common.Hash{1}}}
+ testSHead2 = types.SignedHeader{SignatureSlot: 0x2010, Header: types.Header{Slot: 0x200e, StateRoot: common.Hash{2}}}
+ // testSHead3 is at the end of period 1 but signed in period 2
+ testSHead3 = types.SignedHeader{SignatureSlot: 0x4000, Header: types.Header{Slot: 0x3fff, StateRoot: common.Hash{3}}}
+ testSHead4 = types.SignedHeader{SignatureSlot: 0x6444, Header: types.Header{Slot: 0x6443, StateRoot: common.Hash{4}}}
+)
+
+func TestValidatedHead(t *testing.T) {
+ chain := &TestCommitteeChain{}
+ ht := &TestHeadTracker{}
+ headSync := NewHeadSync(ht, chain)
+ ts := NewTestScheduler(t, headSync)
+
+ ht.ExpValidated(t, 0, nil)
+
+ ts.AddServer(testServer1, 1)
+ ts.ServerEvent(EvNewSignedHead, testServer1, testSHead1)
+ ts.Run(1)
+ // announced head should be queued because of uninitialized chain
+ ht.ExpValidated(t, 1, nil)
+
+ chain.SetNextSyncPeriod(0) // initialize chain
+ ts.Run(2)
+ // expect previously queued head to be validated
+ ht.ExpValidated(t, 2, []types.SignedHeader{testSHead1})
+
+ chain.SetNextSyncPeriod(1)
+ ts.ServerEvent(EvNewSignedHead, testServer1, testSHead2)
+ ts.AddServer(testServer2, 1)
+ ts.ServerEvent(EvNewSignedHead, testServer2, testSHead2)
+ ts.Run(3)
+ // expect both head announcements to be validated instantly
+ ht.ExpValidated(t, 3, []types.SignedHeader{testSHead2, testSHead2})
+
+ ts.ServerEvent(EvNewSignedHead, testServer1, testSHead3)
+ ts.AddServer(testServer3, 1)
+ ts.ServerEvent(EvNewSignedHead, testServer3, testSHead4)
+ ts.Run(4)
+ // future period annonced heads should be queued
+ ht.ExpValidated(t, 4, nil)
+
+ chain.SetNextSyncPeriod(2)
+ ts.Run(5)
+ // testSHead3 can be validated now but not testSHead4
+ ht.ExpValidated(t, 5, []types.SignedHeader{testSHead3})
+
+ // server 3 disconnected without proving period 3, its announced head should be dropped
+ ts.RemoveServer(testServer3)
+ ts.Run(6)
+ ht.ExpValidated(t, 6, nil)
+
+ chain.SetNextSyncPeriod(3)
+ ts.Run(7)
+ // testSHead4 could be validated now but it's not queued by any registered server
+ ht.ExpValidated(t, 7, nil)
+
+ ts.ServerEvent(EvNewSignedHead, testServer2, testSHead4)
+ ts.Run(8)
+ // now testSHead4 should be validated
+ ht.ExpValidated(t, 8, []types.SignedHeader{testSHead4})
+}
+
+func TestPrefetchHead(t *testing.T) {
+ chain := &TestCommitteeChain{}
+ ht := &TestHeadTracker{}
+ headSync := NewHeadSync(ht, chain)
+ ts := NewTestScheduler(t, headSync)
+
+ ht.ExpPrefetch(t, 0, testHead0) // no servers registered
+
+ ts.AddServer(testServer1, 1)
+ ts.ServerEvent(EvNewHead, testServer1, testHead1)
+ ts.Run(1)
+ ht.ExpPrefetch(t, 1, testHead1) // s1: h1
+
+ ts.AddServer(testServer2, 1)
+ ts.ServerEvent(EvNewHead, testServer2, testHead2)
+ ts.Run(2)
+ ht.ExpPrefetch(t, 2, testHead2) // s1: h1, s2: h2
+
+ ts.ServerEvent(EvNewHead, testServer1, testHead2)
+ ts.Run(3)
+ ht.ExpPrefetch(t, 3, testHead2) // s1: h2, s2: h2
+
+ ts.AddServer(testServer3, 1)
+ ts.ServerEvent(EvNewHead, testServer3, testHead3)
+ ts.Run(4)
+ ht.ExpPrefetch(t, 4, testHead2) // s1: h2, s2: h2, s3: h3
+
+ ts.AddServer(testServer4, 1)
+ ts.ServerEvent(EvNewHead, testServer4, testHead4)
+ ts.Run(5)
+ ht.ExpPrefetch(t, 5, testHead2) // s1: h2, s2: h2, s3: h3, s4: h4
+
+ ts.ServerEvent(EvNewHead, testServer2, testHead3)
+ ts.Run(6)
+ ht.ExpPrefetch(t, 6, testHead3) // s1: h2, s2: h3, s3: h3, s4: h4
+
+ ts.RemoveServer(testServer3)
+ ts.Run(7)
+ ht.ExpPrefetch(t, 7, testHead4) // s1: h2, s2: h3, s4: h4
+
+ ts.RemoveServer(testServer1)
+ ts.Run(8)
+ ht.ExpPrefetch(t, 8, testHead4) // s2: h3, s4: h4
+
+ ts.RemoveServer(testServer4)
+ ts.Run(9)
+ ht.ExpPrefetch(t, 9, testHead3) // s2: h3
+
+ ts.RemoveServer(testServer2)
+ ts.Run(10)
+ ht.ExpPrefetch(t, 10, testHead0) // no servers registered
+}
diff --git a/beacon/light/sync/test_helpers.go b/beacon/light/sync/test_helpers.go
new file mode 100644
index 0000000000..a1ca2b5909
--- /dev/null
+++ b/beacon/light/sync/test_helpers.go
@@ -0,0 +1,254 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package sync
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/beacon/light"
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/types"
+)
+
+type requestWithID struct {
+ sid request.ServerAndID
+ request request.Request
+}
+
+type TestScheduler struct {
+ t *testing.T
+ module request.Module
+ events []request.Event
+ servers []request.Server
+ allowance map[request.Server]int
+ sent map[int][]requestWithID
+ testIndex int
+ expFail map[request.Server]int // expected Server.Fail calls during next Run
+ lastId request.ID
+}
+
+func NewTestScheduler(t *testing.T, module request.Module) *TestScheduler {
+ return &TestScheduler{
+ t: t,
+ module: module,
+ allowance: make(map[request.Server]int),
+ expFail: make(map[request.Server]int),
+ sent: make(map[int][]requestWithID),
+ }
+}
+
+func (ts *TestScheduler) Run(testIndex int, exp ...any) {
+ expReqs := make([]requestWithID, len(exp)/2)
+ id := ts.lastId
+ for i := range expReqs {
+ id++
+ expReqs[i] = requestWithID{
+ sid: request.ServerAndID{Server: exp[i*2].(request.Server), ID: id},
+ request: exp[i*2+1].(request.Request),
+ }
+ }
+ if len(expReqs) == 0 {
+ expReqs = nil
+ }
+
+ ts.testIndex = testIndex
+ ts.module.Process(ts, ts.events)
+ ts.events = nil
+
+ for server, count := range ts.expFail {
+ delete(ts.expFail, server)
+ if count == 0 {
+ continue
+ }
+ ts.t.Errorf("Missing %d Server.Fail(s) from server %s in test case #%d", count, server.(string), testIndex)
+ }
+
+ if !reflect.DeepEqual(ts.sent[testIndex], expReqs) {
+ ts.t.Errorf("Wrong sent requests in test case #%d (expected %v, got %v)", testIndex, expReqs, ts.sent[testIndex])
+ }
+}
+
+func (ts *TestScheduler) CanSendTo() (cs []request.Server) {
+ for _, server := range ts.servers {
+ if ts.allowance[server] > 0 {
+ cs = append(cs, server)
+ }
+ }
+ return
+}
+
+func (ts *TestScheduler) Send(server request.Server, req request.Request) request.ID {
+ ts.lastId++
+ ts.sent[ts.testIndex] = append(ts.sent[ts.testIndex], requestWithID{
+ sid: request.ServerAndID{Server: server, ID: ts.lastId},
+ request: req,
+ })
+ ts.allowance[server]--
+ return ts.lastId
+}
+
+func (ts *TestScheduler) Fail(server request.Server, desc string) {
+ if ts.expFail[server] == 0 {
+ ts.t.Errorf("Unexpected Fail from server %s in test case #%d: %s", server.(string), ts.testIndex, desc)
+ return
+ }
+ ts.expFail[server]--
+}
+
+func (ts *TestScheduler) Request(testIndex, reqIndex int) requestWithID {
+ if len(ts.sent[testIndex]) < reqIndex {
+ ts.t.Errorf("Missing request from test case %d index %d", testIndex, reqIndex)
+ return requestWithID{}
+ }
+ return ts.sent[testIndex][reqIndex-1]
+}
+
+func (ts *TestScheduler) ServerEvent(evType *request.EventType, server request.Server, data any) {
+ ts.events = append(ts.events, request.Event{
+ Type: evType,
+ Server: server,
+ Data: data,
+ })
+}
+
+func (ts *TestScheduler) RequestEvent(evType *request.EventType, req requestWithID, resp request.Response) {
+ if req.request == nil {
+ return
+ }
+ ts.events = append(ts.events, request.Event{
+ Type: evType,
+ Server: req.sid.Server,
+ Data: request.RequestResponse{
+ ID: req.sid.ID,
+ Request: req.request,
+ Response: resp,
+ },
+ })
+}
+
+func (ts *TestScheduler) AddServer(server request.Server, allowance int) {
+ ts.servers = append(ts.servers, server)
+ ts.allowance[server] = allowance
+ ts.ServerEvent(request.EvRegistered, server, nil)
+}
+
+func (ts *TestScheduler) RemoveServer(server request.Server) {
+ ts.servers = append(ts.servers, server)
+ for i, s := range ts.servers {
+ if s == server {
+ copy(ts.servers[i:len(ts.servers)-1], ts.servers[i+1:])
+ ts.servers = ts.servers[:len(ts.servers)-1]
+ break
+ }
+ }
+ delete(ts.allowance, server)
+ ts.ServerEvent(request.EvUnregistered, server, nil)
+}
+
+func (ts *TestScheduler) AddAllowance(server request.Server, allowance int) {
+ ts.allowance[server] += allowance
+}
+
+func (ts *TestScheduler) ExpFail(server request.Server) {
+ ts.expFail[server]++
+}
+
+type TestCommitteeChain struct {
+ fsp, nsp uint64
+ init bool
+}
+
+func (t *TestCommitteeChain) CheckpointInit(bootstrap types.BootstrapData) error {
+ t.fsp, t.nsp, t.init = bootstrap.Header.SyncPeriod(), bootstrap.Header.SyncPeriod()+2, true
+ return nil
+}
+
+func (t *TestCommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error {
+ period := update.AttestedHeader.Header.SyncPeriod()
+ if period < t.fsp || period > t.nsp || !t.init {
+ return light.ErrInvalidPeriod
+ }
+ if period == t.nsp {
+ t.nsp++
+ }
+ return nil
+}
+
+func (t *TestCommitteeChain) NextSyncPeriod() (uint64, bool) {
+ return t.nsp, t.init
+}
+
+func (tc *TestCommitteeChain) ExpInit(t *testing.T, ExpInit bool) {
+ if tc.init != ExpInit {
+ t.Errorf("Incorrect init flag (expected %v, got %v)", ExpInit, tc.init)
+ }
+}
+
+func (t *TestCommitteeChain) SetNextSyncPeriod(nsp uint64) {
+ t.init, t.nsp = true, nsp
+}
+
+func (tc *TestCommitteeChain) ExpNextSyncPeriod(t *testing.T, expNsp uint64) {
+ tc.ExpInit(t, true)
+ if tc.nsp != expNsp {
+ t.Errorf("Incorrect NextSyncPeriod (expected %d, got %d)", expNsp, tc.nsp)
+ }
+}
+
+type TestHeadTracker struct {
+ phead types.HeadInfo
+ validated []types.SignedHeader
+}
+
+func (ht *TestHeadTracker) ValidateHead(head types.SignedHeader) (bool, error) {
+ ht.validated = append(ht.validated, head)
+ return true, nil
+}
+
+// TODO add test case for finality
+func (ht *TestHeadTracker) ValidateFinality(head types.FinalityUpdate) (bool, error) {
+ return true, nil
+}
+
+func (ht *TestHeadTracker) ExpValidated(t *testing.T, tci int, expHeads []types.SignedHeader) {
+ for i, expHead := range expHeads {
+ if i >= len(ht.validated) {
+ t.Errorf("Missing validated head in test case #%d index #%d (expected {slot %d blockRoot %x}, got none)", tci, i, expHead.Header.Slot, expHead.Header.Hash())
+ continue
+ }
+ if ht.validated[i] != expHead {
+ vhead := ht.validated[i].Header
+ t.Errorf("Wrong validated head in test case #%d index #%d (expected {slot %d blockRoot %x}, got {slot %d blockRoot %x})", tci, i, expHead.Header.Slot, expHead.Header.Hash(), vhead.Slot, vhead.Hash())
+ }
+ }
+ for i := len(expHeads); i < len(ht.validated); i++ {
+ vhead := ht.validated[i].Header
+ t.Errorf("Unexpected validated head in test case #%d index #%d (expected none, got {slot %d blockRoot %x})", tci, i, vhead.Slot, vhead.Hash())
+ }
+ ht.validated = nil
+}
+
+func (ht *TestHeadTracker) SetPrefetchHead(head types.HeadInfo) {
+ ht.phead = head
+}
+
+func (ht *TestHeadTracker) ExpPrefetch(t *testing.T, tci int, exp types.HeadInfo) {
+ if ht.phead != exp {
+ t.Errorf("Wrong prefetch head in test case #%d (expected {slot %d blockRoot %x}, got {slot %d blockRoot %x})", tci, exp.Slot, exp.BlockRoot, ht.phead.Slot, ht.phead.BlockRoot)
+ }
+}
diff --git a/beacon/light/sync/types.go b/beacon/light/sync/types.go
new file mode 100644
index 0000000000..6449ae842d
--- /dev/null
+++ b/beacon/light/sync/types.go
@@ -0,0 +1,42 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package sync
+
+import (
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/common"
+)
+
+var (
+ EvNewHead = &request.EventType{Name: "newHead"} // data: types.HeadInfo
+ EvNewSignedHead = &request.EventType{Name: "newSignedHead"} // data: types.SignedHeader
+ EvNewFinalityUpdate = &request.EventType{Name: "newFinalityUpdate"} // data: types.FinalityUpdate
+)
+
+type (
+ ReqUpdates struct {
+ FirstPeriod, Count uint64
+ }
+ RespUpdates struct {
+ Updates []*types.LightClientUpdate
+ Committees []*types.SerializedSyncCommittee
+ }
+ ReqHeader common.Hash
+ ReqCheckpointData common.Hash
+ ReqBeaconBlock common.Hash
+)
diff --git a/beacon/light/sync/update_sync.go b/beacon/light/sync/update_sync.go
new file mode 100644
index 0000000000..533e470fb0
--- /dev/null
+++ b/beacon/light/sync/update_sync.go
@@ -0,0 +1,299 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package sync
+
+import (
+ "sort"
+
+ "github.com/ethereum/go-ethereum/beacon/light"
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+const maxUpdateRequest = 8 // maximum number of updates requested in a single request
+
+type committeeChain interface {
+ CheckpointInit(bootstrap types.BootstrapData) error
+ InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error
+ NextSyncPeriod() (uint64, bool)
+}
+
+// CheckpointInit implements request.Module; it fetches the light client bootstrap
+// data belonging to the given checkpoint hash and initializes the committee chain
+// if successful.
+type CheckpointInit struct {
+ chain committeeChain
+ checkpointHash common.Hash
+ locked request.ServerAndID
+ initialized bool
+}
+
+// NewCheckpointInit creates a new CheckpointInit.
+func NewCheckpointInit(chain committeeChain, checkpointHash common.Hash) *CheckpointInit {
+ return &CheckpointInit{
+ chain: chain,
+ checkpointHash: checkpointHash,
+ }
+}
+
+// Process implements request.Module.
+func (s *CheckpointInit) Process(requester request.Requester, events []request.Event) {
+ for _, event := range events {
+ if !event.IsRequestEvent() {
+ continue
+ }
+ sid, req, resp := event.RequestInfo()
+ if s.locked == sid {
+ s.locked = request.ServerAndID{}
+ }
+ if resp != nil {
+ if checkpoint := resp.(*types.BootstrapData); checkpoint.Header.Hash() == common.Hash(req.(ReqCheckpointData)) {
+ s.chain.CheckpointInit(*checkpoint)
+ s.initialized = true
+ return
+ }
+
+ requester.Fail(event.Server, "invalid checkpoint data")
+ }
+ }
+ // start a request if possible
+ if s.initialized || s.locked != (request.ServerAndID{}) {
+ return
+ }
+ cs := requester.CanSendTo()
+ if len(cs) == 0 {
+ return
+ }
+ server := cs[0]
+ id := requester.Send(server, ReqCheckpointData(s.checkpointHash))
+ s.locked = request.ServerAndID{Server: server, ID: id}
+}
+
+// ForwardUpdateSync implements request.Module; it fetches updates between the
+// committee chain head and each server's announced head. Updates are fetched
+// in batches and multiple batches can also be requested in parallel.
+// Out of order responses are also handled; if a batch of updates cannot be added
+// to the chain immediately because of a gap then the future updates are
+// remembered until they can be processed.
+type ForwardUpdateSync struct {
+ chain committeeChain
+ rangeLock rangeLock
+ lockedIDs map[request.ServerAndID]struct{}
+ processQueue []updateResponse
+ nextSyncPeriod map[request.Server]uint64
+}
+
+// NewForwardUpdateSync creates a new ForwardUpdateSync.
+func NewForwardUpdateSync(chain committeeChain) *ForwardUpdateSync {
+ return &ForwardUpdateSync{
+ chain: chain,
+ rangeLock: make(rangeLock),
+ lockedIDs: make(map[request.ServerAndID]struct{}),
+ nextSyncPeriod: make(map[request.Server]uint64),
+ }
+}
+
+// rangeLock allows locking sections of an integer space, preventing the syncing
+// mechanism from making requests again for sections where a not timed out request
+// is already pending or where already fetched and unprocessed data is available.
+type rangeLock map[uint64]int
+
+// lock locks or unlocks the given section, depending on the sign of the add parameter.
+func (r rangeLock) lock(first, count uint64, add int) {
+ for i := first; i < first+count; i++ {
+ if v := r[i] + add; v > 0 {
+ r[i] = v
+ } else {
+ delete(r, i)
+ }
+ }
+}
+
+// firstUnlocked returns the first unlocked section starting at or after start
+// and not longer than maxCount.
+func (r rangeLock) firstUnlocked(start, maxCount uint64) (first, count uint64) {
+ first = start
+ for {
+ if _, ok := r[first]; !ok {
+ break
+ }
+ first++
+ }
+ for {
+ count++
+ if count == maxCount {
+ break
+ }
+ if _, ok := r[first+count]; ok {
+ break
+ }
+ }
+ return
+}
+
+// lockRange locks the range belonging to the given update request, unless the
+// same request has already been locked
+func (s *ForwardUpdateSync) lockRange(sid request.ServerAndID, req ReqUpdates) {
+ if _, ok := s.lockedIDs[sid]; ok {
+ return
+ }
+ s.lockedIDs[sid] = struct{}{}
+ s.rangeLock.lock(req.FirstPeriod, req.Count, 1)
+}
+
+// unlockRange unlocks the range belonging to the given update request, unless
+// same request has already been unlocked
+func (s *ForwardUpdateSync) unlockRange(sid request.ServerAndID, req ReqUpdates) {
+ if _, ok := s.lockedIDs[sid]; !ok {
+ return
+ }
+ delete(s.lockedIDs, sid)
+ s.rangeLock.lock(req.FirstPeriod, req.Count, -1)
+}
+
+// verifyRange returns true if the number of updates and the individual update
+// periods in the response match the requested section.
+func (s *ForwardUpdateSync) verifyRange(request ReqUpdates, response RespUpdates) bool {
+ if uint64(len(response.Updates)) != request.Count || uint64(len(response.Committees)) != request.Count {
+ return false
+ }
+ for i, update := range response.Updates {
+ if update.AttestedHeader.Header.SyncPeriod() != request.FirstPeriod+uint64(i) {
+ return false
+ }
+ }
+ return true
+}
+
+// updateResponse is a response that has passed initial verification and has been
+// queued for processing. Note that an update response cannot be processed until
+// the previous updates have also been added to the chain.
+type updateResponse struct {
+ sid request.ServerAndID
+ request ReqUpdates
+ response RespUpdates
+}
+
+// updateResponseList implements sort.Sort and sorts update request/response events by FirstPeriod.
+type updateResponseList []updateResponse
+
+func (u updateResponseList) Len() int { return len(u) }
+func (u updateResponseList) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
+func (u updateResponseList) Less(i, j int) bool {
+ return u[i].request.FirstPeriod < u[j].request.FirstPeriod
+}
+
+// Process implements request.Module.
+func (s *ForwardUpdateSync) Process(requester request.Requester, events []request.Event) {
+ for _, event := range events {
+ switch event.Type {
+ case request.EvResponse, request.EvFail, request.EvTimeout:
+ sid, rq, rs := event.RequestInfo()
+ req := rq.(ReqUpdates)
+ var queued bool
+ if event.Type == request.EvResponse {
+ resp := rs.(RespUpdates)
+ if s.verifyRange(req, resp) {
+ // there is a response with a valid format; put it in the process queue
+ s.processQueue = append(s.processQueue, updateResponse{sid: sid, request: req, response: resp})
+ s.lockRange(sid, req)
+ queued = true
+ } else {
+ requester.Fail(event.Server, "invalid update range")
+ }
+ }
+ if !queued {
+ s.unlockRange(sid, req)
+ }
+ case EvNewSignedHead:
+ signedHead := event.Data.(types.SignedHeader)
+ s.nextSyncPeriod[event.Server] = types.SyncPeriod(signedHead.SignatureSlot + 256)
+ case request.EvUnregistered:
+ delete(s.nextSyncPeriod, event.Server)
+ }
+ }
+
+ // try processing ordered list of available responses
+ sort.Sort(updateResponseList(s.processQueue))
+ for s.processQueue != nil {
+ u := s.processQueue[0]
+ if !s.processResponse(requester, u) {
+ break
+ }
+ s.unlockRange(u.sid, u.request)
+ s.processQueue = s.processQueue[1:]
+ if len(s.processQueue) == 0 {
+ s.processQueue = nil
+ }
+ }
+
+ // start new requests if possible
+ startPeriod, chainInit := s.chain.NextSyncPeriod()
+ if !chainInit {
+ return
+ }
+ for {
+ firstPeriod, maxCount := s.rangeLock.firstUnlocked(startPeriod, maxUpdateRequest)
+ var (
+ sendTo request.Server
+ bestCount uint64
+ )
+ for _, server := range requester.CanSendTo() {
+ nextPeriod := s.nextSyncPeriod[server]
+ if nextPeriod <= firstPeriod {
+ continue
+ }
+ count := maxCount
+ if nextPeriod < firstPeriod+maxCount {
+ count = nextPeriod - firstPeriod
+ }
+ if count > bestCount {
+ sendTo, bestCount = server, count
+ }
+ }
+ if sendTo == nil {
+ return
+ }
+ req := ReqUpdates{FirstPeriod: firstPeriod, Count: bestCount}
+ id := requester.Send(sendTo, req)
+ s.lockRange(request.ServerAndID{Server: sendTo, ID: id}, req)
+ }
+}
+
+// processResponse adds the fetched updates and committees to the committee chain.
+// Returns true in case of full or partial success.
+func (s *ForwardUpdateSync) processResponse(requester request.Requester, u updateResponse) (success bool) {
+ for i, update := range u.response.Updates {
+ if err := s.chain.InsertUpdate(update, u.response.Committees[i]); err != nil {
+ if err == light.ErrInvalidPeriod {
+ // there is a gap in the update periods; stop processing without
+ // failing and try again next time
+ return
+ }
+ if err == light.ErrInvalidUpdate || err == light.ErrWrongCommitteeRoot || err == light.ErrCannotReorg {
+ requester.Fail(u.sid.Server, "invalid update received")
+ } else {
+ log.Error("Unexpected InsertUpdate error", "error", err)
+ }
+ return
+ }
+ success = true
+ }
+ return
+}
diff --git a/beacon/light/sync/update_sync_test.go b/beacon/light/sync/update_sync_test.go
new file mode 100644
index 0000000000..1c4b3d6d76
--- /dev/null
+++ b/beacon/light/sync/update_sync_test.go
@@ -0,0 +1,219 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package sync
+
+import (
+ "testing"
+
+ "github.com/ethereum/go-ethereum/beacon/light/request"
+ "github.com/ethereum/go-ethereum/beacon/types"
+)
+
+func TestCheckpointInit(t *testing.T) {
+ chain := &TestCommitteeChain{}
+ checkpoint := &types.BootstrapData{Header: types.Header{Slot: 0x2000*4 + 0x1000}} // period 4
+ checkpointHash := checkpoint.Header.Hash()
+ chkInit := NewCheckpointInit(chain, checkpointHash)
+ ts := NewTestScheduler(t, chkInit)
+ // add 2 servers
+ ts.AddServer(testServer1, 1)
+ ts.AddServer(testServer2, 1)
+
+ // expect bootstrap request to server 1
+ ts.Run(1, testServer1, ReqCheckpointData(checkpointHash))
+
+ // server 1 times out; expect request to server 2
+ ts.RequestEvent(request.EvTimeout, ts.Request(1, 1), nil)
+ ts.Run(2, testServer2, ReqCheckpointData(checkpointHash))
+
+ // invalid response from server 2; expect init state to still be false
+ ts.RequestEvent(request.EvResponse, ts.Request(2, 1), &types.BootstrapData{Header: types.Header{Slot: 123456}})
+ ts.ExpFail(testServer2)
+ ts.Run(3)
+ chain.ExpInit(t, false)
+
+ // server 1 fails (hard timeout)
+ ts.RequestEvent(request.EvFail, ts.Request(1, 1), nil)
+ ts.Run(4)
+ chain.ExpInit(t, false)
+
+ // server 3 is registered; expect bootstrap request to server 3
+ ts.AddServer(testServer3, 1)
+ ts.Run(5, testServer3, ReqCheckpointData(checkpointHash))
+
+ // valid response from server 3; expect chain to be initialized
+ ts.RequestEvent(request.EvResponse, ts.Request(5, 1), checkpoint)
+ ts.Run(6)
+ chain.ExpInit(t, true)
+}
+
+func TestUpdateSyncParallel(t *testing.T) {
+ chain := &TestCommitteeChain{}
+ chain.SetNextSyncPeriod(0)
+ updateSync := NewForwardUpdateSync(chain)
+ ts := NewTestScheduler(t, updateSync)
+ // add 2 servers, head at period 100; allow 3-3 parallel requests for each
+ ts.AddServer(testServer1, 3)
+ ts.ServerEvent(EvNewSignedHead, testServer1, types.SignedHeader{SignatureSlot: 0x2000*100 + 0x1000})
+ ts.AddServer(testServer2, 3)
+ ts.ServerEvent(EvNewSignedHead, testServer2, types.SignedHeader{SignatureSlot: 0x2000*100 + 0x1000})
+
+ // expect 6 requests to be sent
+ ts.Run(1,
+ testServer1, ReqUpdates{FirstPeriod: 0, Count: 8},
+ testServer1, ReqUpdates{FirstPeriod: 8, Count: 8},
+ testServer1, ReqUpdates{FirstPeriod: 16, Count: 8},
+ testServer2, ReqUpdates{FirstPeriod: 24, Count: 8},
+ testServer2, ReqUpdates{FirstPeriod: 32, Count: 8},
+ testServer2, ReqUpdates{FirstPeriod: 40, Count: 8})
+
+ // valid response to request 1; expect 8 periods synced and a new request started
+ ts.RequestEvent(request.EvResponse, ts.Request(1, 1), testRespUpdate(ts.Request(1, 1)))
+ ts.AddAllowance(testServer1, 1)
+ ts.Run(2, testServer1, ReqUpdates{FirstPeriod: 48, Count: 8})
+ chain.ExpNextSyncPeriod(t, 8)
+
+ // valid response to requests 4 and 5
+ ts.RequestEvent(request.EvResponse, ts.Request(1, 4), testRespUpdate(ts.Request(1, 4)))
+ ts.RequestEvent(request.EvResponse, ts.Request(1, 5), testRespUpdate(ts.Request(1, 5)))
+ ts.AddAllowance(testServer2, 2)
+ // expect 2 more requests but no sync progress (responses 4 and 5 cannot be added before 2 and 3)
+ ts.Run(3,
+ testServer2, ReqUpdates{FirstPeriod: 56, Count: 8},
+ testServer2, ReqUpdates{FirstPeriod: 64, Count: 8})
+ chain.ExpNextSyncPeriod(t, 8)
+
+ // soft timeout for requests 2 and 3 (server 1 is overloaded)
+ ts.RequestEvent(request.EvTimeout, ts.Request(1, 2), nil)
+ ts.RequestEvent(request.EvTimeout, ts.Request(1, 3), nil)
+ // no allowance, no more requests
+ ts.Run(4)
+
+ // valid response to requests 6 and 8 and 9
+ ts.RequestEvent(request.EvResponse, ts.Request(1, 6), testRespUpdate(ts.Request(1, 6)))
+ ts.RequestEvent(request.EvResponse, ts.Request(3, 1), testRespUpdate(ts.Request(3, 1)))
+ ts.RequestEvent(request.EvResponse, ts.Request(3, 2), testRespUpdate(ts.Request(3, 2)))
+ ts.AddAllowance(testServer2, 3)
+ // server 2 can now resend requests 2 and 3 (timed out by server 1) and also send a new one
+ ts.Run(5,
+ testServer2, ReqUpdates{FirstPeriod: 8, Count: 8},
+ testServer2, ReqUpdates{FirstPeriod: 16, Count: 8},
+ testServer2, ReqUpdates{FirstPeriod: 72, Count: 8})
+
+ // server 1 finally answers timed out request 2
+ ts.RequestEvent(request.EvResponse, ts.Request(1, 2), testRespUpdate(ts.Request(1, 2)))
+ ts.AddAllowance(testServer1, 1)
+ // expect sync progress and one new request
+ ts.Run(6, testServer1, ReqUpdates{FirstPeriod: 80, Count: 8})
+ chain.ExpNextSyncPeriod(t, 16)
+
+ // server 2 answers requests 11 and 12 (resends of requests 2 and 3)
+ ts.RequestEvent(request.EvResponse, ts.Request(5, 1), testRespUpdate(ts.Request(5, 1)))
+ ts.RequestEvent(request.EvResponse, ts.Request(5, 2), testRespUpdate(ts.Request(5, 2)))
+ ts.AddAllowance(testServer2, 2)
+ ts.Run(7,
+ testServer2, ReqUpdates{FirstPeriod: 88, Count: 8},
+ testServer2, ReqUpdates{FirstPeriod: 96, Count: 4})
+ // finally the gap is filled, update can process responses up to req6
+ chain.ExpNextSyncPeriod(t, 48)
+
+ // all remaining requests are answered
+ ts.RequestEvent(request.EvResponse, ts.Request(1, 3), testRespUpdate(ts.Request(1, 3)))
+ ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testRespUpdate(ts.Request(2, 1)))
+ ts.RequestEvent(request.EvResponse, ts.Request(5, 3), testRespUpdate(ts.Request(5, 3)))
+ ts.RequestEvent(request.EvResponse, ts.Request(6, 1), testRespUpdate(ts.Request(6, 1)))
+ ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testRespUpdate(ts.Request(7, 1)))
+ ts.RequestEvent(request.EvResponse, ts.Request(7, 2), testRespUpdate(ts.Request(7, 2)))
+ ts.Run(8)
+ // expect chain to be fully synced
+ chain.ExpNextSyncPeriod(t, 100)
+}
+
+func TestUpdateSyncDifferentHeads(t *testing.T) {
+ chain := &TestCommitteeChain{}
+ chain.SetNextSyncPeriod(10)
+ updateSync := NewForwardUpdateSync(chain)
+ ts := NewTestScheduler(t, updateSync)
+ // add 3 servers with different announced head periods
+ ts.AddServer(testServer1, 1)
+ ts.ServerEvent(EvNewSignedHead, testServer1, types.SignedHeader{SignatureSlot: 0x2000*15 + 0x1000})
+ ts.AddServer(testServer2, 1)
+ ts.ServerEvent(EvNewSignedHead, testServer2, types.SignedHeader{SignatureSlot: 0x2000*16 + 0x1000})
+ ts.AddServer(testServer3, 1)
+ ts.ServerEvent(EvNewSignedHead, testServer3, types.SignedHeader{SignatureSlot: 0x2000*17 + 0x1000})
+
+ // expect request to the best announced head
+ ts.Run(1, testServer3, ReqUpdates{FirstPeriod: 10, Count: 7})
+
+ // request times out, expect request to the next best head
+ ts.RequestEvent(request.EvTimeout, ts.Request(1, 1), nil)
+ ts.Run(2, testServer2, ReqUpdates{FirstPeriod: 10, Count: 6})
+
+ // request times out, expect request to the last available server
+ ts.RequestEvent(request.EvTimeout, ts.Request(2, 1), nil)
+ ts.Run(3, testServer1, ReqUpdates{FirstPeriod: 10, Count: 5})
+
+ // valid response to request 3, expect chain synced to period 15
+ ts.RequestEvent(request.EvResponse, ts.Request(3, 1), testRespUpdate(ts.Request(3, 1)))
+ ts.AddAllowance(testServer1, 1)
+ ts.Run(4)
+ chain.ExpNextSyncPeriod(t, 15)
+
+ // invalid response to request 1, server can only deliver updates up to period 15 despite announced head
+ truncated := ts.Request(1, 1)
+ truncated.request = ReqUpdates{FirstPeriod: 10, Count: 5}
+ ts.RequestEvent(request.EvResponse, ts.Request(1, 1), testRespUpdate(truncated))
+ ts.ExpFail(testServer3)
+ ts.Run(5)
+ // expect no progress of chain head
+ chain.ExpNextSyncPeriod(t, 15)
+
+ // valid response to request 2, expect chain synced to period 16
+ ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testRespUpdate(ts.Request(2, 1)))
+ ts.AddAllowance(testServer2, 1)
+ ts.Run(6)
+ chain.ExpNextSyncPeriod(t, 16)
+
+ // a new server is registered with announced head period 17
+ ts.AddServer(testServer4, 1)
+ ts.ServerEvent(EvNewSignedHead, testServer4, types.SignedHeader{SignatureSlot: 0x2000*17 + 0x1000})
+ // expect request to sync one more period
+ ts.Run(7, testServer4, ReqUpdates{FirstPeriod: 16, Count: 1})
+
+ // valid response, expect chain synced to period 17
+ ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testRespUpdate(ts.Request(7, 1)))
+ ts.AddAllowance(testServer4, 1)
+ ts.Run(8)
+ chain.ExpNextSyncPeriod(t, 17)
+}
+
+func testRespUpdate(request requestWithID) request.Response {
+ var resp RespUpdates
+ if request.request == nil {
+ return resp
+ }
+ req := request.request.(ReqUpdates)
+ resp.Updates = make([]*types.LightClientUpdate, int(req.Count))
+ resp.Committees = make([]*types.SerializedSyncCommittee, int(req.Count))
+ period := req.FirstPeriod
+ for i := range resp.Updates {
+ resp.Updates[i] = &types.LightClientUpdate{AttestedHeader: types.SignedHeader{Header: types.Header{Slot: 0x2000*period + 0x1000}}}
+ resp.Committees[i] = new(types.SerializedSyncCommittee)
+ period++
+ }
+ return resp
+}
diff --git a/beacon/params/params.go b/beacon/params/params.go
index ee9feb1acb..e4e0d00934 100644
--- a/beacon/params/params.go
+++ b/beacon/params/params.go
@@ -41,4 +41,6 @@ const (
StateIndexNextSyncCommittee = 55
StateIndexExecPayload = 56
StateIndexExecHead = 908
+
+ BodyIndexExecPayload = 25
)
diff --git a/beacon/types/light_sync.go b/beacon/types/light_sync.go
index 3284081e4d..ed62d237f1 100644
--- a/beacon/types/light_sync.go
+++ b/beacon/types/light_sync.go
@@ -20,11 +20,20 @@ import (
"errors"
"fmt"
+ "github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/beacon/merkle"
"github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/common"
+ "github.com/protolambda/zrnt/eth2/beacon/capella"
+ "github.com/protolambda/ztyp/tree"
)
+// HeadInfo represents an unvalidated new head announcement.
+type HeadInfo struct {
+ Slot uint64
+ BlockRoot common.Hash
+}
+
// BootstrapData contains a sync committee where light sync can be started,
// together with a proof through a beacon header and corresponding state.
// Note: BootstrapData is fetched from a server based on a known checkpoint hash.
@@ -134,3 +143,50 @@ func (u UpdateScore) BetterThan(w UpdateScore) bool {
}
return u.SignerCount > w.SignerCount
}
+
+type HeaderWithExecProof struct {
+ Header
+ PayloadHeader *capella.ExecutionPayloadHeader
+ PayloadBranch merkle.Values
+}
+
+func (h *HeaderWithExecProof) Validate() error {
+ payloadRoot := merkle.Value(h.PayloadHeader.HashTreeRoot(tree.GetHashFn()))
+ return merkle.VerifyProof(h.BodyRoot, params.BodyIndexExecPayload, h.PayloadBranch, payloadRoot)
+}
+
+type FinalityUpdate struct {
+ Attested, Finalized HeaderWithExecProof
+ FinalityBranch merkle.Values
+ // Sync committee BLS signature aggregate
+ Signature SyncAggregate
+ // Slot in which the signature has been created (newer than Header.Slot,
+ // determines the signing sync committee)
+ SignatureSlot uint64
+}
+
+func (u *FinalityUpdate) SignedHeader() SignedHeader {
+ return SignedHeader{
+ Header: u.Attested.Header,
+ Signature: u.Signature,
+ SignatureSlot: u.SignatureSlot,
+ }
+}
+
+func (u *FinalityUpdate) Validate() error {
+ if err := u.Attested.Validate(); err != nil {
+ return err
+ }
+ if err := u.Finalized.Validate(); err != nil {
+ return err
+ }
+ return merkle.VerifyProof(u.Attested.StateRoot, params.StateIndexFinalBlock, u.FinalityBranch, merkle.Value(u.Finalized.Hash()))
+}
+
+// ChainHeadEvent returns an authenticated execution payload associated with the
+// latest accepted head of the beacon chain, along with the hash of the latest
+// finalized execution block.
+type ChainHeadEvent struct {
+ HeadBlock *engine.ExecutableData
+ Finalized common.Hash
+}
diff --git a/cmd/blsync/engine_api.go b/cmd/blsync/engine_api.go
new file mode 100644
index 0000000000..d10750e295
--- /dev/null
+++ b/cmd/blsync/engine_api.go
@@ -0,0 +1,69 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package main
+
+import (
+ "context"
+ "time"
+
+ "github.com/ethereum/go-ethereum/beacon/engine"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/rpc"
+)
+
+func updateEngineApi(client *rpc.Client, headCh chan types.ChainHeadEvent) {
+ for event := range headCh {
+ if client == nil { // dry run, no engine API specified
+ log.Info("New execution block retrieved", "block number", event.HeadBlock.Number, "block hash", event.HeadBlock.BlockHash, "finalized block hash", event.Finalized)
+ } else {
+ if status, err := callNewPayloadV2(client, event.HeadBlock); err == nil {
+ log.Info("Successful NewPayload", "block number", event.HeadBlock.Number, "block hash", event.HeadBlock.BlockHash, "status", status)
+ } else {
+ log.Error("Failed NewPayload", "block number", event.HeadBlock.Number, "block hash", event.HeadBlock.BlockHash, "error", err)
+ }
+ if status, err := callForkchoiceUpdatedV1(client, event.HeadBlock.BlockHash, event.Finalized); err == nil {
+ log.Info("Successful ForkchoiceUpdated", "head", event.HeadBlock.BlockHash, "status", status)
+ } else {
+ log.Error("Failed ForkchoiceUpdated", "head", event.HeadBlock.BlockHash, "error", err)
+ }
+ }
+ }
+}
+
+func callNewPayloadV2(client *rpc.Client, execData *engine.ExecutableData) (string, error) {
+ var resp engine.PayloadStatusV1
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ err := client.CallContext(ctx, &resp, "engine_newPayloadV2", execData)
+ cancel()
+ return resp.Status, err
+}
+
+func callForkchoiceUpdatedV1(client *rpc.Client, headHash, finalizedHash common.Hash) (string, error) {
+ var resp engine.ForkChoiceResponse
+ update := engine.ForkchoiceStateV1{
+ HeadBlockHash: headHash,
+ SafeBlockHash: finalizedHash,
+ FinalizedBlockHash: finalizedHash,
+ }
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ err := client.CallContext(ctx, &resp, "engine_forkchoiceUpdatedV1", update, nil)
+ cancel()
+ return resp.PayloadStatus.Status, err
+}
diff --git a/cmd/blsync/main.go b/cmd/blsync/main.go
new file mode 100644
index 0000000000..fd22761d3c
--- /dev/null
+++ b/cmd/blsync/main.go
@@ -0,0 +1,125 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package main
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/ethereum/go-ethereum/beacon/blsync"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/cmd/utils"
+ "github.com/ethereum/go-ethereum/internal/flags"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/node"
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+ "github.com/urfave/cli/v2"
+)
+
+var (
+ verbosityFlag = &cli.IntFlag{
+ Name: "verbosity",
+ Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail",
+ Value: 3,
+ Category: flags.LoggingCategory,
+ }
+ vmoduleFlag = &cli.StringFlag{
+ Name: "vmodule",
+ Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)",
+ Value: "",
+ Hidden: true,
+ Category: flags.LoggingCategory,
+ }
+)
+
+func main() {
+ app := flags.NewApp("beacon light syncer tool")
+ app.Flags = []cli.Flag{
+ utils.BeaconApiFlag,
+ utils.BeaconApiHeaderFlag,
+ utils.BeaconThresholdFlag,
+ utils.BeaconNoFilterFlag,
+ utils.BeaconConfigFlag,
+ utils.BeaconGenesisRootFlag,
+ utils.BeaconGenesisTimeFlag,
+ utils.BeaconCheckpointFlag,
+ //TODO datadir for optional permanent database
+ utils.MainnetFlag,
+ utils.SepoliaFlag,
+ utils.GoerliFlag,
+ utils.BlsyncApiFlag,
+ utils.BlsyncJWTSecretFlag,
+ verbosityFlag,
+ vmoduleFlag,
+ }
+ app.Action = sync
+
+ if err := app.Run(os.Args); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+func sync(ctx *cli.Context) error {
+ usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb"
+ output := io.Writer(os.Stderr)
+ if usecolor {
+ output = colorable.NewColorable(os.Stderr)
+ }
+ verbosity := log.FromLegacyLevel(ctx.Int(verbosityFlag.Name))
+ log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(output, verbosity, usecolor)))
+
+ headCh := make(chan types.ChainHeadEvent, 16)
+ client := blsync.NewClient(ctx)
+ sub := client.SubscribeChainHeadEvent(headCh)
+ go updateEngineApi(makeRPCClient(ctx), headCh)
+ client.Start()
+ // run until stopped
+ <-ctx.Done()
+ client.Stop()
+ sub.Unsubscribe()
+ close(headCh)
+ return nil
+}
+
+func makeRPCClient(ctx *cli.Context) *rpc.Client {
+ if !ctx.IsSet(utils.BlsyncApiFlag.Name) {
+ log.Warn("No engine API target specified, performing a dry run")
+ return nil
+ }
+ if !ctx.IsSet(utils.BlsyncJWTSecretFlag.Name) {
+ utils.Fatalf("JWT secret parameter missing") //TODO use default if datadir is specified
+ }
+
+ engineApiUrl, jwtFileName := ctx.String(utils.BlsyncApiFlag.Name), ctx.String(utils.BlsyncJWTSecretFlag.Name)
+ var jwtSecret [32]byte
+ if jwt, err := node.ObtainJWTSecret(jwtFileName); err == nil {
+ copy(jwtSecret[:], jwt)
+ } else {
+ utils.Fatalf("Error loading or generating JWT secret: %v", err)
+ }
+ auth := node.NewJWTAuth(jwtSecret)
+ cl, err := rpc.DialOptions(context.Background(), engineApiUrl, rpc.WithHTTPAuth(auth))
+ if err != nil {
+ utils.Fatalf("Could not create RPC client: %v", err)
+ }
+ return cl
+}
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index 5f52f1df54..37d17fb1e7 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
+ "github.com/ethereum/go-ethereum/beacon/blsync"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -221,6 +222,8 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
}
catalyst.RegisterSimulatedBeaconAPIs(stack, simBeacon)
stack.RegisterLifecycle(simBeacon)
+ } else if ctx.IsSet(utils.BeaconApiFlag.Name) {
+ stack.RegisterLifecycle(catalyst.NewBlsync(blsync.NewClient(ctx), eth))
} else {
err := catalyst.Register(stack, eth)
if err != nil {
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 9a88e9f2e8..d79d23e226 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -146,6 +146,14 @@ var (
configFileFlag,
utils.LogDebugFlag,
utils.LogBacktraceAtFlag,
+ utils.BeaconApiFlag,
+ utils.BeaconApiHeaderFlag,
+ utils.BeaconThresholdFlag,
+ utils.BeaconNoFilterFlag,
+ utils.BeaconConfigFlag,
+ utils.BeaconGenesisRootFlag,
+ utils.BeaconGenesisTimeFlag,
+ utils.BeaconCheckpointFlag,
}, utils.NetworkFlags, utils.DatabaseFlags)
rpcFlags = []cli.Flag{
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index fad567cd55..e002975d53 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
+ bparams "github.com/ethereum/go-ethereum/beacon/params"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/fdlimit"
"github.com/ethereum/go-ethereum/core"
@@ -281,6 +282,58 @@ var (
Value: ethconfig.Defaults.TransactionHistory,
Category: flags.StateCategory,
}
+ // Beacon client light sync settings
+ BeaconApiFlag = &cli.StringSliceFlag{
+ Name: "beacon.api",
+ Usage: "Beacon node (CL) light client API URL. This flag can be given multiple times.",
+ Category: flags.BeaconCategory,
+ }
+ BeaconApiHeaderFlag = &cli.StringSliceFlag{
+ Name: "beacon.api.header",
+ Usage: "Pass custom HTTP header fields to the emote beacon node API in \"key:value\" format. This flag can be given multiple times.",
+ Category: flags.BeaconCategory,
+ }
+ BeaconThresholdFlag = &cli.IntFlag{
+ Name: "beacon.threshold",
+ Usage: "Beacon sync committee participation threshold",
+ Value: bparams.SyncCommitteeSupermajority,
+ Category: flags.BeaconCategory,
+ }
+ BeaconNoFilterFlag = &cli.BoolFlag{
+ Name: "beacon.nofilter",
+ Usage: "Disable future slot signature filter",
+ Category: flags.BeaconCategory,
+ }
+ BeaconConfigFlag = &cli.StringFlag{
+ Name: "beacon.config",
+ Usage: "Beacon chain config YAML file",
+ Category: flags.BeaconCategory,
+ }
+ BeaconGenesisRootFlag = &cli.StringFlag{
+ Name: "beacon.genesis.gvroot",
+ Usage: "Beacon chain genesis validators root",
+ Category: flags.BeaconCategory,
+ }
+ BeaconGenesisTimeFlag = &cli.Uint64Flag{
+ Name: "beacon.genesis.time",
+ Usage: "Beacon chain genesis time",
+ Category: flags.BeaconCategory,
+ }
+ BeaconCheckpointFlag = &cli.StringFlag{
+ Name: "beacon.checkpoint",
+ Usage: "Beacon chain weak subjectivity checkpoint block hash",
+ Category: flags.BeaconCategory,
+ }
+ BlsyncApiFlag = &cli.StringFlag{
+ Name: "blsync.engine.api",
+ Usage: "Target EL engine API URL",
+ Category: flags.BeaconCategory,
+ }
+ BlsyncJWTSecretFlag = &cli.StringFlag{
+ Name: "blsync.jwtsecret",
+ Usage: "Path to a JWT secret to use for target engine API endpoint",
+ Category: flags.BeaconCategory,
+ }
// Transaction pool settings
TxPoolLocalsFlag = &cli.StringFlag{
Name: "txpool.locals",
diff --git a/eth/catalyst/blsync.go b/eth/catalyst/blsync.go
new file mode 100644
index 0000000000..4877cf4c63
--- /dev/null
+++ b/eth/catalyst/blsync.go
@@ -0,0 +1,88 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package catalyst
+
+import (
+ "github.com/ethereum/go-ethereum/beacon/engine"
+ "github.com/ethereum/go-ethereum/beacon/types"
+ "github.com/ethereum/go-ethereum/eth"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/log"
+)
+
+// Blsync tracks the head of the beacon chain through the beacon light client
+// and drives the local node via ConsensusAPI.
+type Blsync struct {
+ engine *ConsensusAPI
+ client Client
+ headCh chan types.ChainHeadEvent
+ headSub event.Subscription
+
+ quitCh chan struct{}
+}
+
+type Client interface {
+ SubscribeChainHeadEvent(ch chan<- types.ChainHeadEvent) event.Subscription
+ Start()
+ Stop()
+}
+
+// NewBlsync creates a new beacon light syncer.
+func NewBlsync(client Client, eth *eth.Ethereum) *Blsync {
+ return &Blsync{
+ engine: newConsensusAPIWithoutHeartbeat(eth),
+ client: client,
+ headCh: make(chan types.ChainHeadEvent, 16),
+ quitCh: make(chan struct{}),
+ }
+}
+
+// Start starts underlying beacon light client and the sync logic for driving
+// the local node.
+func (b *Blsync) Start() error {
+ log.Info("Beacon light sync started")
+ b.headSub = b.client.SubscribeChainHeadEvent(b.headCh)
+ go b.client.Start()
+
+ for {
+ select {
+ case <-b.quitCh:
+ return nil
+ case head := <-b.headCh:
+ if _, err := b.engine.NewPayloadV2(*head.HeadBlock); err != nil {
+ log.Error("failed to send new payload", "err", err)
+ continue
+ }
+ update := engine.ForkchoiceStateV1{
+ HeadBlockHash: head.HeadBlock.BlockHash,
+ SafeBlockHash: head.Finalized, //TODO pass finalized or empty hash here?
+ FinalizedBlockHash: head.Finalized,
+ }
+ if _, err := b.engine.ForkchoiceUpdatedV1(update, nil); err != nil {
+ log.Error("failed to send forkchoice updated", "err", err)
+ continue
+ }
+ }
+ }
+}
+
+// Stop signals to the light client and syncer to exit.
+func (b *Blsync) Stop() error {
+ b.client.Stop()
+ close(b.quitCh)
+ return nil
+}
diff --git a/go.mod b/go.mod
index 6591bee62f..ca45364b8b 100644
--- a/go.mod
+++ b/go.mod
@@ -19,7 +19,8 @@ require (
github.com/crate-crypto/go-kzg-4844 v0.7.0
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set/v2 v2.1.0
- github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127
+ github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0
+ github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3
github.com/ethereum/c-kzg-4844 v0.4.0
github.com/fatih/color v1.13.0
github.com/ferranbt/fastssz v0.1.2
@@ -54,6 +55,8 @@ require (
github.com/olekukonko/tablewriter v0.0.5
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7
+ github.com/protolambda/zrnt v0.30.0
+ github.com/protolambda/ztyp v0.2.2
github.com/rs/cors v1.7.0
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible
github.com/status-im/keycard-go v0.2.0
diff --git a/go.sum b/go.sum
index cc74e15cb4..18236bf8e7 100644
--- a/go.sum
+++ b/go.sum
@@ -149,9 +149,11 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
+github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao=
+github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw=
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
-github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo=
-github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
+github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 h1:+3HCtB74++ClLy8GgjUQYeC8R4ILzVcIe8+5edAJJnE=
+github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d/go.mod h1:DngW8aVqWbuLRMHItjPUyqdj+HWPvnQe8V8y1nDpIbM=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -239,6 +241,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -298,6 +301,7 @@ github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6w
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
+github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -380,6 +384,7 @@ github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
@@ -448,8 +453,14 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/protolambda/bls12-381-util v0.0.0-20210720105258-a772f2aac13e/go.mod h1:MPZvj2Pr0N8/dXyTPS5REeg2sdLG7t8DRzC1rLv925w=
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7 h1:cZC+usqsYgHtlBaGulVnZ1hfKAi8iWtujBnRLQE698c=
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
+github.com/protolambda/messagediff v1.4.0/go.mod h1:LboJp0EwIbJsePYpzh5Op/9G1/4mIztMRYzzwR0dR2M=
+github.com/protolambda/zrnt v0.30.0 h1:pHEn69ZgaDFGpLGGYG1oD7DvYI7RDirbMBPfbC+8p4g=
+github.com/protolambda/zrnt v0.30.0/go.mod h1:qcdX9CXFeVNCQK/q0nswpzhd+31RHMk2Ax/2lMsJ4Jw=
+github.com/protolambda/ztyp v0.2.2 h1:rVcL3vBu9W/aV646zF6caLS/dyn9BN8NYiuJzicLNyY=
+github.com/protolambda/ztyp v0.2.2/go.mod h1:9bYgKGqg3wJqT9ac1gI2hnVb0STQq7p/1lapqrqY1dU=
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -842,6 +853,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/internal/flags/categories.go b/internal/flags/categories.go
index 3ff0767921..c044e28f38 100644
--- a/internal/flags/categories.go
+++ b/internal/flags/categories.go
@@ -20,6 +20,7 @@ import "github.com/urfave/cli/v2"
const (
EthCategory = "ETHEREUM"
+ BeaconCategory = "BEACON CHAIN"
LightCategory = "LIGHT CLIENT"
DevCategory = "DEVELOPER CHAIN"
StateCategory = "STATE HISTORY MANAGEMENT"
diff --git a/node/node.go b/node/node.go
index dfa83d58c7..c5cb552d27 100644
--- a/node/node.go
+++ b/node/node.go
@@ -339,15 +339,9 @@ func (n *Node) closeDataDir() {
}
}
-// obtainJWTSecret loads the jwt-secret, either from the provided config,
-// or from the default location. If neither of those are present, it generates
-// a new secret and stores to the default location.
-func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) {
- fileName := cliParam
- if len(fileName) == 0 {
- // no path provided, use default
- fileName = n.ResolvePath(datadirJWTKey)
- }
+// ObtainJWTSecret loads the jwt-secret from the provided config. If the file is not
+// present, it generates a new secret and stores to the given location.
+func ObtainJWTSecret(fileName string) ([]byte, error) {
// try reading from file
if data, err := os.ReadFile(fileName); err == nil {
jwtSecret := common.FromHex(strings.TrimSpace(string(data)))
@@ -373,6 +367,18 @@ func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) {
return jwtSecret, nil
}
+// obtainJWTSecret loads the jwt-secret, either from the provided config,
+// or from the default location. If neither of those are present, it generates
+// a new secret and stores to the default location.
+func (n *Node) obtainJWTSecret(cliParam string) ([]byte, error) {
+ fileName := cliParam
+ if len(fileName) == 0 {
+ // no path provided, use default
+ fileName = n.ResolvePath(datadirJWTKey)
+ }
+ return ObtainJWTSecret(fileName)
+}
+
// startRPC is a helper method to configure all the various RPC endpoints during node
// startup. It's not meant to be called at any time afterwards as it makes certain
// assumptions about the state of the node.