mirror of https://github.com/ethereum/go-ethereum
cmd/blsync, beacon/light: beacon chain light client (#28822)
Here we add a beacon chain light client for use by geth. Geth can now be configured to run against a beacon chain API endpoint, without pointing a CL to it. To set this up, use the `--beacon.api` flag. Information provided by the beacon chain is verified, i.e. geth does not blindly trust the beacon API endpoint in this mode. The root of trust are the beacon chain 'sync committees'. The configured beacon API endpoint must provide light client data. At this time, only Lodestar and Nimbus provide the necessary APIs. There is also a standalone tool, cmd/blsync, which uses the beacon chain light client to drive any EL implementation via its engine API. --------- Co-authored-by: Felix Lange <fjl@twurst.com>pull/29184/head
parent
d8e0807da2
commit
aadcb88675
@ -0,0 +1,203 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package blsync |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine" |
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/light/sync" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/lru" |
||||
ctypes "github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/holiman/uint256" |
||||
"github.com/protolambda/zrnt/eth2/beacon/capella" |
||||
"github.com/protolambda/zrnt/eth2/configs" |
||||
"github.com/protolambda/ztyp/tree" |
||||
) |
||||
|
||||
// beaconBlockSync implements request.Module; it fetches the beacon blocks belonging
|
||||
// to the validated and prefetch heads.
|
||||
type beaconBlockSync struct { |
||||
recentBlocks *lru.Cache[common.Hash, *capella.BeaconBlock] |
||||
locked map[common.Hash]request.ServerAndID |
||||
serverHeads map[request.Server]common.Hash |
||||
headTracker headTracker |
||||
|
||||
lastHeadInfo types.HeadInfo |
||||
chainHeadFeed *event.Feed |
||||
} |
||||
|
||||
type headTracker interface { |
||||
PrefetchHead() types.HeadInfo |
||||
ValidatedHead() (types.SignedHeader, bool) |
||||
ValidatedFinality() (types.FinalityUpdate, bool) |
||||
} |
||||
|
||||
// newBeaconBlockSync returns a new beaconBlockSync.
|
||||
func newBeaconBlockSync(headTracker headTracker, chainHeadFeed *event.Feed) *beaconBlockSync { |
||||
return &beaconBlockSync{ |
||||
headTracker: headTracker, |
||||
chainHeadFeed: chainHeadFeed, |
||||
recentBlocks: lru.NewCache[common.Hash, *capella.BeaconBlock](10), |
||||
locked: make(map[common.Hash]request.ServerAndID), |
||||
serverHeads: make(map[request.Server]common.Hash), |
||||
} |
||||
} |
||||
|
||||
// Process implements request.Module.
|
||||
func (s *beaconBlockSync) Process(requester request.Requester, events []request.Event) { |
||||
for _, event := range events { |
||||
switch event.Type { |
||||
case request.EvResponse, request.EvFail, request.EvTimeout: |
||||
sid, req, resp := event.RequestInfo() |
||||
blockRoot := common.Hash(req.(sync.ReqBeaconBlock)) |
||||
if resp != nil { |
||||
s.recentBlocks.Add(blockRoot, resp.(*capella.BeaconBlock)) |
||||
} |
||||
if s.locked[blockRoot] == sid { |
||||
delete(s.locked, blockRoot) |
||||
} |
||||
case sync.EvNewHead: |
||||
s.serverHeads[event.Server] = event.Data.(types.HeadInfo).BlockRoot |
||||
case request.EvUnregistered: |
||||
delete(s.serverHeads, event.Server) |
||||
} |
||||
} |
||||
s.updateEventFeed() |
||||
// request validated head block if unavailable and not yet requested
|
||||
if vh, ok := s.headTracker.ValidatedHead(); ok { |
||||
s.tryRequestBlock(requester, vh.Header.Hash(), false) |
||||
} |
||||
// request prefetch head if the given server has announced it
|
||||
if prefetchHead := s.headTracker.PrefetchHead().BlockRoot; prefetchHead != (common.Hash{}) { |
||||
s.tryRequestBlock(requester, prefetchHead, true) |
||||
} |
||||
} |
||||
|
||||
func (s *beaconBlockSync) tryRequestBlock(requester request.Requester, blockRoot common.Hash, needSameHead bool) { |
||||
if _, ok := s.recentBlocks.Get(blockRoot); ok { |
||||
return |
||||
} |
||||
if _, ok := s.locked[blockRoot]; ok { |
||||
return |
||||
} |
||||
for _, server := range requester.CanSendTo() { |
||||
if needSameHead && (s.serverHeads[server] != blockRoot) { |
||||
continue |
||||
} |
||||
id := requester.Send(server, sync.ReqBeaconBlock(blockRoot)) |
||||
s.locked[blockRoot] = request.ServerAndID{Server: server, ID: id} |
||||
return |
||||
} |
||||
} |
||||
|
||||
func blockHeadInfo(block *capella.BeaconBlock) types.HeadInfo { |
||||
if block == nil { |
||||
return types.HeadInfo{} |
||||
} |
||||
return types.HeadInfo{Slot: uint64(block.Slot), BlockRoot: beaconBlockHash(block)} |
||||
} |
||||
|
||||
// beaconBlockHash calculates the hash of a beacon block.
|
||||
func beaconBlockHash(beaconBlock *capella.BeaconBlock) common.Hash { |
||||
return common.Hash(beaconBlock.HashTreeRoot(configs.Mainnet, tree.GetHashFn())) |
||||
} |
||||
|
||||
// getExecBlock extracts the execution block from the beacon block's payload.
|
||||
func getExecBlock(beaconBlock *capella.BeaconBlock) (*ctypes.Block, error) { |
||||
payload := &beaconBlock.Body.ExecutionPayload |
||||
txs := make([]*ctypes.Transaction, len(payload.Transactions)) |
||||
for i, opaqueTx := range payload.Transactions { |
||||
var tx ctypes.Transaction |
||||
if err := tx.UnmarshalBinary(opaqueTx); err != nil { |
||||
return nil, fmt.Errorf("failed to parse tx %d: %v", i, err) |
||||
} |
||||
txs[i] = &tx |
||||
} |
||||
withdrawals := make([]*ctypes.Withdrawal, len(payload.Withdrawals)) |
||||
for i, w := range payload.Withdrawals { |
||||
withdrawals[i] = &ctypes.Withdrawal{ |
||||
Index: uint64(w.Index), |
||||
Validator: uint64(w.ValidatorIndex), |
||||
Address: common.Address(w.Address), |
||||
Amount: uint64(w.Amount), |
||||
} |
||||
} |
||||
wroot := ctypes.DeriveSha(ctypes.Withdrawals(withdrawals), trie.NewStackTrie(nil)) |
||||
execHeader := &ctypes.Header{ |
||||
ParentHash: common.Hash(payload.ParentHash), |
||||
UncleHash: ctypes.EmptyUncleHash, |
||||
Coinbase: common.Address(payload.FeeRecipient), |
||||
Root: common.Hash(payload.StateRoot), |
||||
TxHash: ctypes.DeriveSha(ctypes.Transactions(txs), trie.NewStackTrie(nil)), |
||||
ReceiptHash: common.Hash(payload.ReceiptsRoot), |
||||
Bloom: ctypes.Bloom(payload.LogsBloom), |
||||
Difficulty: common.Big0, |
||||
Number: new(big.Int).SetUint64(uint64(payload.BlockNumber)), |
||||
GasLimit: uint64(payload.GasLimit), |
||||
GasUsed: uint64(payload.GasUsed), |
||||
Time: uint64(payload.Timestamp), |
||||
Extra: []byte(payload.ExtraData), |
||||
MixDigest: common.Hash(payload.PrevRandao), // reused in merge
|
||||
Nonce: ctypes.BlockNonce{}, // zero
|
||||
BaseFee: (*uint256.Int)(&payload.BaseFeePerGas).ToBig(), |
||||
WithdrawalsHash: &wroot, |
||||
} |
||||
execBlock := ctypes.NewBlockWithHeader(execHeader).WithBody(txs, nil).WithWithdrawals(withdrawals) |
||||
if execBlockHash := execBlock.Hash(); execBlockHash != common.Hash(payload.BlockHash) { |
||||
return execBlock, fmt.Errorf("Sanity check failed, payload hash does not match (expected %x, got %x)", common.Hash(payload.BlockHash), execBlockHash) |
||||
} |
||||
return execBlock, nil |
||||
} |
||||
|
||||
func (s *beaconBlockSync) updateEventFeed() { |
||||
head, ok := s.headTracker.ValidatedHead() |
||||
if !ok { |
||||
return |
||||
} |
||||
finality, ok := s.headTracker.ValidatedFinality() //TODO fetch directly if subscription does not deliver
|
||||
if !ok || head.Header.Epoch() != finality.Attested.Header.Epoch() { |
||||
return |
||||
} |
||||
validatedHead := head.Header.Hash() |
||||
headBlock, ok := s.recentBlocks.Get(validatedHead) |
||||
if !ok { |
||||
return |
||||
} |
||||
headInfo := blockHeadInfo(headBlock) |
||||
if headInfo == s.lastHeadInfo { |
||||
return |
||||
} |
||||
s.lastHeadInfo = headInfo |
||||
// new head block and finality info available; extract executable data and send event to feed
|
||||
execBlock, err := getExecBlock(headBlock) |
||||
if err != nil { |
||||
log.Error("Error extracting execution block from validated beacon block", "error", err) |
||||
return |
||||
} |
||||
s.chainHeadFeed.Send(types.ChainHeadEvent{ |
||||
HeadBlock: engine.BlockToExecutableData(execBlock, nil, nil).ExecutionPayload, |
||||
Finalized: common.Hash(finality.Finalized.PayloadHeader.BlockHash), |
||||
}) |
||||
} |
@ -0,0 +1,160 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package blsync |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/light/sync" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
"github.com/protolambda/zrnt/eth2/beacon/capella" |
||||
"github.com/protolambda/zrnt/eth2/configs" |
||||
"github.com/protolambda/ztyp/tree" |
||||
) |
||||
|
||||
var ( |
||||
testServer1 = "testServer1" |
||||
testServer2 = "testServer2" |
||||
|
||||
testBlock1 = &capella.BeaconBlock{ |
||||
Slot: 123, |
||||
Body: capella.BeaconBlockBody{ |
||||
ExecutionPayload: capella.ExecutionPayload{BlockNumber: 456}, |
||||
}, |
||||
} |
||||
testBlock2 = &capella.BeaconBlock{ |
||||
Slot: 124, |
||||
Body: capella.BeaconBlockBody{ |
||||
ExecutionPayload: capella.ExecutionPayload{BlockNumber: 457}, |
||||
}, |
||||
} |
||||
) |
||||
|
||||
func init() { |
||||
eb1, _ := getExecBlock(testBlock1) |
||||
testBlock1.Body.ExecutionPayload.BlockHash = tree.Root(eb1.Hash()) |
||||
eb2, _ := getExecBlock(testBlock2) |
||||
testBlock2.Body.ExecutionPayload.BlockHash = tree.Root(eb2.Hash()) |
||||
} |
||||
|
||||
func TestBlockSync(t *testing.T) { |
||||
ht := &testHeadTracker{} |
||||
eventFeed := new(event.Feed) |
||||
blockSync := newBeaconBlockSync(ht, eventFeed) |
||||
headCh := make(chan types.ChainHeadEvent, 16) |
||||
eventFeed.Subscribe(headCh) |
||||
ts := sync.NewTestScheduler(t, blockSync) |
||||
ts.AddServer(testServer1, 1) |
||||
ts.AddServer(testServer2, 1) |
||||
|
||||
expHeadBlock := func(tci int, expHead *capella.BeaconBlock) { |
||||
var expNumber, headNumber uint64 |
||||
if expHead != nil { |
||||
expNumber = uint64(expHead.Body.ExecutionPayload.BlockNumber) |
||||
} |
||||
select { |
||||
case event := <-headCh: |
||||
headNumber = event.HeadBlock.Number |
||||
default: |
||||
} |
||||
if headNumber != expNumber { |
||||
t.Errorf("Wrong head block in test case #%d (expected block number %d, got %d)", tci, expNumber, headNumber) |
||||
} |
||||
} |
||||
|
||||
// no block requests expected until head tracker knows about a head
|
||||
ts.Run(1) |
||||
expHeadBlock(1, nil) |
||||
|
||||
// set block 1 as prefetch head, announced by server 2
|
||||
head1 := blockHeadInfo(testBlock1) |
||||
ht.prefetch = head1 |
||||
ts.ServerEvent(sync.EvNewHead, testServer2, head1) |
||||
// expect request to server 2 which has announced the head
|
||||
ts.Run(2, testServer2, sync.ReqBeaconBlock(head1.BlockRoot)) |
||||
|
||||
// valid response
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testBlock1) |
||||
ts.AddAllowance(testServer2, 1) |
||||
ts.Run(3) |
||||
// head block still not expected as the fetched block is not the validated head yet
|
||||
expHeadBlock(3, nil) |
||||
|
||||
// set as validated head, expect no further requests but block 1 set as head block
|
||||
ht.validated.Header = blockHeader(testBlock1) |
||||
ts.Run(4) |
||||
expHeadBlock(4, testBlock1) |
||||
|
||||
// set block 2 as prefetch head, announced by server 1
|
||||
head2 := blockHeadInfo(testBlock2) |
||||
ht.prefetch = head2 |
||||
ts.ServerEvent(sync.EvNewHead, testServer1, head2) |
||||
// expect request to server 1
|
||||
ts.Run(5, testServer1, sync.ReqBeaconBlock(head2.BlockRoot)) |
||||
|
||||
// req2 fails, no further requests expected because server 2 has not announced it
|
||||
ts.RequestEvent(request.EvFail, ts.Request(5, 1), nil) |
||||
ts.Run(6) |
||||
|
||||
// set as validated head before retrieving block; now it's assumed to be available from server 2 too
|
||||
ht.validated.Header = blockHeader(testBlock2) |
||||
// expect req2 retry to server 2
|
||||
ts.Run(7, testServer2, sync.ReqBeaconBlock(head2.BlockRoot)) |
||||
// now head block should be unavailable again
|
||||
expHeadBlock(4, nil) |
||||
|
||||
// valid response, now head block should be block 2 immediately as it is already validated
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testBlock2) |
||||
ts.Run(8) |
||||
expHeadBlock(5, testBlock2) |
||||
} |
||||
|
||||
func blockHeader(block *capella.BeaconBlock) types.Header { |
||||
return types.Header{ |
||||
Slot: uint64(block.Slot), |
||||
ProposerIndex: uint64(block.ProposerIndex), |
||||
ParentRoot: common.Hash(block.ParentRoot), |
||||
StateRoot: common.Hash(block.StateRoot), |
||||
BodyRoot: common.Hash(block.Body.HashTreeRoot(configs.Mainnet, tree.GetHashFn())), |
||||
} |
||||
} |
||||
|
||||
type testHeadTracker struct { |
||||
prefetch types.HeadInfo |
||||
validated types.SignedHeader |
||||
} |
||||
|
||||
func (h *testHeadTracker) PrefetchHead() types.HeadInfo { |
||||
return h.prefetch |
||||
} |
||||
|
||||
func (h *testHeadTracker) ValidatedHead() (types.SignedHeader, bool) { |
||||
return h.validated, h.validated.Header != (types.Header{}) |
||||
} |
||||
|
||||
// TODO add test case for finality
|
||||
func (h *testHeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) { |
||||
return types.FinalityUpdate{ |
||||
Attested: types.HeaderWithExecProof{Header: h.validated.Header}, |
||||
Finalized: types.HeaderWithExecProof{PayloadHeader: &capella.ExecutionPayloadHeader{}}, |
||||
Signature: h.validated.Signature, |
||||
SignatureSlot: h.validated.SignatureSlot, |
||||
}, h.validated.Header != (types.Header{}) |
||||
} |
@ -0,0 +1,103 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package blsync |
||||
|
||||
import ( |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/light" |
||||
"github.com/ethereum/go-ethereum/beacon/light/api" |
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/light/sync" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
"github.com/urfave/cli/v2" |
||||
) |
||||
|
||||
type Client struct { |
||||
scheduler *request.Scheduler |
||||
chainHeadFeed *event.Feed |
||||
urls []string |
||||
customHeader map[string]string |
||||
} |
||||
|
||||
func NewClient(ctx *cli.Context) *Client { |
||||
if !ctx.IsSet(utils.BeaconApiFlag.Name) { |
||||
utils.Fatalf("Beacon node light client API URL not specified") |
||||
} |
||||
var ( |
||||
chainConfig = makeChainConfig(ctx) |
||||
customHeader = make(map[string]string) |
||||
) |
||||
for _, s := range ctx.StringSlice(utils.BeaconApiHeaderFlag.Name) { |
||||
kv := strings.Split(s, ":") |
||||
if len(kv) != 2 { |
||||
utils.Fatalf("Invalid custom API header entry: %s", s) |
||||
} |
||||
customHeader[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1]) |
||||
} |
||||
// create data structures
|
||||
var ( |
||||
db = memorydb.New() |
||||
threshold = ctx.Int(utils.BeaconThresholdFlag.Name) |
||||
committeeChain = light.NewCommitteeChain(db, chainConfig.ChainConfig, threshold, !ctx.Bool(utils.BeaconNoFilterFlag.Name)) |
||||
headTracker = light.NewHeadTracker(committeeChain, threshold) |
||||
) |
||||
headSync := sync.NewHeadSync(headTracker, committeeChain) |
||||
|
||||
// set up scheduler and sync modules
|
||||
chainHeadFeed := new(event.Feed) |
||||
scheduler := request.NewScheduler() |
||||
checkpointInit := sync.NewCheckpointInit(committeeChain, chainConfig.Checkpoint) |
||||
forwardSync := sync.NewForwardUpdateSync(committeeChain) |
||||
beaconBlockSync := newBeaconBlockSync(headTracker, chainHeadFeed) |
||||
scheduler.RegisterTarget(headTracker) |
||||
scheduler.RegisterTarget(committeeChain) |
||||
scheduler.RegisterModule(checkpointInit, "checkpointInit") |
||||
scheduler.RegisterModule(forwardSync, "forwardSync") |
||||
scheduler.RegisterModule(headSync, "headSync") |
||||
scheduler.RegisterModule(beaconBlockSync, "beaconBlockSync") |
||||
|
||||
return &Client{ |
||||
scheduler: scheduler, |
||||
urls: ctx.StringSlice(utils.BeaconApiFlag.Name), |
||||
customHeader: customHeader, |
||||
chainHeadFeed: chainHeadFeed, |
||||
} |
||||
} |
||||
|
||||
// SubscribeChainHeadEvent allows callers to subscribe a provided channel to new
|
||||
// head updates.
|
||||
func (c *Client) SubscribeChainHeadEvent(ch chan<- types.ChainHeadEvent) event.Subscription { |
||||
return c.chainHeadFeed.Subscribe(ch) |
||||
} |
||||
|
||||
func (c *Client) Start() { |
||||
c.scheduler.Start() |
||||
// register server(s)
|
||||
for _, url := range c.urls { |
||||
beaconApi := api.NewBeaconLightApi(url, c.customHeader) |
||||
c.scheduler.RegisterServer(request.NewServer(api.NewApiServer(beaconApi), &mclock.System{})) |
||||
} |
||||
} |
||||
|
||||
func (c *Client) Stop() { |
||||
c.scheduler.Stop() |
||||
} |
@ -0,0 +1,113 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package blsync |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/urfave/cli/v2" |
||||
) |
||||
|
||||
// lightClientConfig contains beacon light client configuration
|
||||
type lightClientConfig struct { |
||||
*types.ChainConfig |
||||
Checkpoint common.Hash |
||||
} |
||||
|
||||
var ( |
||||
MainnetConfig = lightClientConfig{ |
||||
ChainConfig: (&types.ChainConfig{ |
||||
GenesisValidatorsRoot: common.HexToHash("0x4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95"), |
||||
GenesisTime: 1606824023, |
||||
}). |
||||
AddFork("GENESIS", 0, []byte{0, 0, 0, 0}). |
||||
AddFork("ALTAIR", 74240, []byte{1, 0, 0, 0}). |
||||
AddFork("BELLATRIX", 144896, []byte{2, 0, 0, 0}). |
||||
AddFork("CAPELLA", 194048, []byte{3, 0, 0, 0}), |
||||
Checkpoint: common.HexToHash("0x388be41594ec7d6a6894f18c73f3469f07e2c19a803de4755d335817ed8e2e5a"), |
||||
} |
||||
|
||||
SepoliaConfig = lightClientConfig{ |
||||
ChainConfig: (&types.ChainConfig{ |
||||
GenesisValidatorsRoot: common.HexToHash("0xd8ea171f3c94aea21ebc42a1ed61052acf3f9209c00e4efbaaddac09ed9b8078"), |
||||
GenesisTime: 1655733600, |
||||
}). |
||||
AddFork("GENESIS", 0, []byte{144, 0, 0, 105}). |
||||
AddFork("ALTAIR", 50, []byte{144, 0, 0, 112}). |
||||
AddFork("BELLATRIX", 100, []byte{144, 0, 0, 113}). |
||||
AddFork("CAPELLA", 56832, []byte{144, 0, 0, 114}), |
||||
Checkpoint: common.HexToHash("0x1005a6d9175e96bfbce4d35b80f468e9bff0b674e1e861d16e09e10005a58e81"), |
||||
} |
||||
|
||||
GoerliConfig = lightClientConfig{ |
||||
ChainConfig: (&types.ChainConfig{ |
||||
GenesisValidatorsRoot: common.HexToHash("0x043db0d9a83813551ee2f33450d23797757d430911a9320530ad8a0eabc43efb"), |
||||
GenesisTime: 1614588812, |
||||
}). |
||||
AddFork("GENESIS", 0, []byte{0, 0, 16, 32}). |
||||
AddFork("ALTAIR", 36660, []byte{1, 0, 16, 32}). |
||||
AddFork("BELLATRIX", 112260, []byte{2, 0, 16, 32}). |
||||
AddFork("CAPELLA", 162304, []byte{3, 0, 16, 32}), |
||||
Checkpoint: common.HexToHash("0x53a0f4f0a378e2c4ae0a9ee97407eb69d0d737d8d8cd0a5fb1093f42f7b81c49"), |
||||
} |
||||
) |
||||
|
||||
func makeChainConfig(ctx *cli.Context) lightClientConfig { |
||||
utils.CheckExclusive(ctx, utils.MainnetFlag, utils.GoerliFlag, utils.SepoliaFlag) |
||||
customConfig := ctx.IsSet(utils.BeaconConfigFlag.Name) || ctx.IsSet(utils.BeaconGenesisRootFlag.Name) || ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) |
||||
var config lightClientConfig |
||||
switch { |
||||
case ctx.Bool(utils.MainnetFlag.Name): |
||||
config = MainnetConfig |
||||
case ctx.Bool(utils.SepoliaFlag.Name): |
||||
config = SepoliaConfig |
||||
case ctx.Bool(utils.GoerliFlag.Name): |
||||
config = GoerliConfig |
||||
default: |
||||
if !customConfig { |
||||
config = MainnetConfig |
||||
} |
||||
} |
||||
if customConfig && config.Forks != nil { |
||||
utils.Fatalf("Cannot use custom beacon chain config flags in combination with pre-defined network config") |
||||
} |
||||
if ctx.IsSet(utils.BeaconGenesisRootFlag.Name) { |
||||
if c, err := hexutil.Decode(ctx.String(utils.BeaconGenesisRootFlag.Name)); err == nil && len(c) <= 32 { |
||||
copy(config.GenesisValidatorsRoot[:len(c)], c) |
||||
} else { |
||||
utils.Fatalf("Invalid hex string", "beacon.genesis.gvroot", ctx.String(utils.BeaconGenesisRootFlag.Name), "error", err) |
||||
} |
||||
} |
||||
if ctx.IsSet(utils.BeaconGenesisTimeFlag.Name) { |
||||
config.GenesisTime = ctx.Uint64(utils.BeaconGenesisTimeFlag.Name) |
||||
} |
||||
if ctx.IsSet(utils.BeaconConfigFlag.Name) { |
||||
if err := config.ChainConfig.LoadForks(ctx.String(utils.BeaconConfigFlag.Name)); err != nil { |
||||
utils.Fatalf("Could not load beacon chain config file", "file name", ctx.String(utils.BeaconConfigFlag.Name), "error", err) |
||||
} |
||||
} |
||||
if ctx.IsSet(utils.BeaconCheckpointFlag.Name) { |
||||
if c, err := hexutil.Decode(ctx.String(utils.BeaconCheckpointFlag.Name)); err == nil && len(c) <= 32 { |
||||
copy(config.Checkpoint[:len(c)], c) |
||||
} else { |
||||
utils.Fatalf("Invalid hex string", "beacon.checkpoint", ctx.String(utils.BeaconCheckpointFlag.Name), "error", err) |
||||
} |
||||
} |
||||
return config |
||||
} |
@ -0,0 +1,103 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
import ( |
||||
"reflect" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/light/sync" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
// ApiServer is a wrapper around BeaconLightApi that implements request.requestServer.
|
||||
type ApiServer struct { |
||||
api *BeaconLightApi |
||||
eventCallback func(event request.Event) |
||||
unsubscribe func() |
||||
} |
||||
|
||||
// NewApiServer creates a new ApiServer.
|
||||
func NewApiServer(api *BeaconLightApi) *ApiServer { |
||||
return &ApiServer{api: api} |
||||
} |
||||
|
||||
// Subscribe implements request.requestServer.
|
||||
func (s *ApiServer) Subscribe(eventCallback func(event request.Event)) { |
||||
s.eventCallback = eventCallback |
||||
listener := HeadEventListener{ |
||||
OnNewHead: func(slot uint64, blockRoot common.Hash) { |
||||
log.Debug("New head received", "slot", slot, "blockRoot", blockRoot) |
||||
eventCallback(request.Event{Type: sync.EvNewHead, Data: types.HeadInfo{Slot: slot, BlockRoot: blockRoot}}) |
||||
}, |
||||
OnSignedHead: func(head types.SignedHeader) { |
||||
log.Debug("New signed head received", "slot", head.Header.Slot, "blockRoot", head.Header.Hash(), "signerCount", head.Signature.SignerCount()) |
||||
eventCallback(request.Event{Type: sync.EvNewSignedHead, Data: head}) |
||||
}, |
||||
OnFinality: func(head types.FinalityUpdate) { |
||||
log.Debug("New finality update received", "slot", head.Attested.Slot, "blockRoot", head.Attested.Hash(), "signerCount", head.Signature.SignerCount()) |
||||
eventCallback(request.Event{Type: sync.EvNewFinalityUpdate, Data: head}) |
||||
}, |
||||
OnError: func(err error) { |
||||
log.Warn("Head event stream error", "err", err) |
||||
}, |
||||
} |
||||
s.unsubscribe = s.api.StartHeadListener(listener) |
||||
} |
||||
|
||||
// SendRequest implements request.requestServer.
|
||||
func (s *ApiServer) SendRequest(id request.ID, req request.Request) { |
||||
go func() { |
||||
var resp request.Response |
||||
var err error |
||||
switch data := req.(type) { |
||||
case sync.ReqUpdates: |
||||
log.Debug("Beacon API: requesting light client update", "reqid", id, "period", data.FirstPeriod, "count", data.Count) |
||||
var r sync.RespUpdates |
||||
r.Updates, r.Committees, err = s.api.GetBestUpdatesAndCommittees(data.FirstPeriod, data.Count) |
||||
resp = r |
||||
case sync.ReqHeader: |
||||
log.Debug("Beacon API: requesting header", "reqid", id, "hash", common.Hash(data)) |
||||
resp, err = s.api.GetHeader(common.Hash(data)) |
||||
case sync.ReqCheckpointData: |
||||
log.Debug("Beacon API: requesting checkpoint data", "reqid", id, "hash", common.Hash(data)) |
||||
resp, err = s.api.GetCheckpointData(common.Hash(data)) |
||||
case sync.ReqBeaconBlock: |
||||
log.Debug("Beacon API: requesting block", "reqid", id, "hash", common.Hash(data)) |
||||
resp, err = s.api.GetBeaconBlock(common.Hash(data)) |
||||
default: |
||||
} |
||||
|
||||
if err != nil { |
||||
log.Warn("Beacon API request failed", "type", reflect.TypeOf(req), "reqid", id, "err", err) |
||||
s.eventCallback(request.Event{Type: request.EvFail, Data: request.RequestResponse{ID: id, Request: req}}) |
||||
} else { |
||||
s.eventCallback(request.Event{Type: request.EvResponse, Data: request.RequestResponse{ID: id, Request: req, Response: resp}}) |
||||
} |
||||
}() |
||||
} |
||||
|
||||
// Unsubscribe implements request.requestServer.
|
||||
// Note: Unsubscribe should not be called concurrently with Subscribe.
|
||||
func (s *ApiServer) Unsubscribe() { |
||||
if s.unsubscribe != nil { |
||||
s.unsubscribe() |
||||
s.unsubscribe = nil |
||||
} |
||||
} |
@ -0,0 +1,496 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more detaiapi.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"net/http" |
||||
"time" |
||||
|
||||
"github.com/donovanhide/eventsource" |
||||
"github.com/ethereum/go-ethereum/beacon/merkle" |
||||
"github.com/ethereum/go-ethereum/beacon/params" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/hexutil" |
||||
"github.com/protolambda/zrnt/eth2/beacon/capella" |
||||
"github.com/protolambda/zrnt/eth2/configs" |
||||
"github.com/protolambda/ztyp/tree" |
||||
) |
||||
|
||||
var ( |
||||
ErrNotFound = errors.New("404 Not Found") |
||||
ErrInternal = errors.New("500 Internal Server Error") |
||||
) |
||||
|
||||
type CommitteeUpdate struct { |
||||
Version string |
||||
Update types.LightClientUpdate |
||||
NextSyncCommittee types.SerializedSyncCommittee |
||||
} |
||||
|
||||
// See data structure definition here:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientupdate
|
||||
type committeeUpdateJson struct { |
||||
Version string `json:"version"` |
||||
Data committeeUpdateData `json:"data"` |
||||
} |
||||
|
||||
type committeeUpdateData struct { |
||||
Header jsonBeaconHeader `json:"attested_header"` |
||||
NextSyncCommittee types.SerializedSyncCommittee `json:"next_sync_committee"` |
||||
NextSyncCommitteeBranch merkle.Values `json:"next_sync_committee_branch"` |
||||
FinalizedHeader *jsonBeaconHeader `json:"finalized_header,omitempty"` |
||||
FinalityBranch merkle.Values `json:"finality_branch,omitempty"` |
||||
SyncAggregate types.SyncAggregate `json:"sync_aggregate"` |
||||
SignatureSlot common.Decimal `json:"signature_slot"` |
||||
} |
||||
|
||||
type jsonBeaconHeader struct { |
||||
Beacon types.Header `json:"beacon"` |
||||
} |
||||
|
||||
type jsonHeaderWithExecProof struct { |
||||
Beacon types.Header `json:"beacon"` |
||||
Execution *capella.ExecutionPayloadHeader `json:"execution"` |
||||
ExecutionBranch merkle.Values `json:"execution_branch"` |
||||
} |
||||
|
||||
// UnmarshalJSON unmarshals from JSON.
|
||||
func (u *CommitteeUpdate) UnmarshalJSON(input []byte) error { |
||||
var dec committeeUpdateJson |
||||
if err := json.Unmarshal(input, &dec); err != nil { |
||||
return err |
||||
} |
||||
u.Version = dec.Version |
||||
u.NextSyncCommittee = dec.Data.NextSyncCommittee |
||||
u.Update = types.LightClientUpdate{ |
||||
AttestedHeader: types.SignedHeader{ |
||||
Header: dec.Data.Header.Beacon, |
||||
Signature: dec.Data.SyncAggregate, |
||||
SignatureSlot: uint64(dec.Data.SignatureSlot), |
||||
}, |
||||
NextSyncCommitteeRoot: u.NextSyncCommittee.Root(), |
||||
NextSyncCommitteeBranch: dec.Data.NextSyncCommitteeBranch, |
||||
FinalityBranch: dec.Data.FinalityBranch, |
||||
} |
||||
if dec.Data.FinalizedHeader != nil { |
||||
u.Update.FinalizedHeader = &dec.Data.FinalizedHeader.Beacon |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// fetcher is an interface useful for debug-harnessing the http api.
|
||||
type fetcher interface { |
||||
Do(req *http.Request) (*http.Response, error) |
||||
} |
||||
|
||||
// BeaconLightApi requests light client information from a beacon node REST API.
|
||||
// Note: all required API endpoints are currently only implemented by Lodestar.
|
||||
type BeaconLightApi struct { |
||||
url string |
||||
client fetcher |
||||
customHeaders map[string]string |
||||
} |
||||
|
||||
func NewBeaconLightApi(url string, customHeaders map[string]string) *BeaconLightApi { |
||||
return &BeaconLightApi{ |
||||
url: url, |
||||
client: &http.Client{ |
||||
Timeout: time.Second * 10, |
||||
}, |
||||
customHeaders: customHeaders, |
||||
} |
||||
} |
||||
|
||||
func (api *BeaconLightApi) httpGet(path string) ([]byte, error) { |
||||
req, err := http.NewRequest("GET", api.url+path, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for k, v := range api.customHeaders { |
||||
req.Header.Set(k, v) |
||||
} |
||||
resp, err := api.client.Do(req) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer resp.Body.Close() |
||||
switch resp.StatusCode { |
||||
case 200: |
||||
return io.ReadAll(resp.Body) |
||||
case 404: |
||||
return nil, ErrNotFound |
||||
case 500: |
||||
return nil, ErrInternal |
||||
default: |
||||
return nil, fmt.Errorf("unexpected error from API endpoint \"%s\": status code %d", path, resp.StatusCode) |
||||
} |
||||
} |
||||
|
||||
func (api *BeaconLightApi) httpGetf(format string, params ...any) ([]byte, error) { |
||||
return api.httpGet(fmt.Sprintf(format, params...)) |
||||
} |
||||
|
||||
// GetBestUpdateAndCommittee fetches and validates LightClientUpdate for given
|
||||
// period and full serialized committee for the next period (committee root hash
|
||||
// equals update.NextSyncCommitteeRoot).
|
||||
// Note that the results are validated but the update signature should be verified
|
||||
// by the caller as its validity depends on the update chain.
|
||||
func (api *BeaconLightApi) GetBestUpdatesAndCommittees(firstPeriod, count uint64) ([]*types.LightClientUpdate, []*types.SerializedSyncCommittee, error) { |
||||
resp, err := api.httpGetf("/eth/v1/beacon/light_client/updates?start_period=%d&count=%d", firstPeriod, count) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
|
||||
var data []CommitteeUpdate |
||||
if err := json.Unmarshal(resp, &data); err != nil { |
||||
return nil, nil, err |
||||
} |
||||
if len(data) != int(count) { |
||||
return nil, nil, errors.New("invalid number of committee updates") |
||||
} |
||||
updates := make([]*types.LightClientUpdate, int(count)) |
||||
committees := make([]*types.SerializedSyncCommittee, int(count)) |
||||
for i, d := range data { |
||||
if d.Update.AttestedHeader.Header.SyncPeriod() != firstPeriod+uint64(i) { |
||||
return nil, nil, errors.New("wrong committee update header period") |
||||
} |
||||
if err := d.Update.Validate(); err != nil { |
||||
return nil, nil, err |
||||
} |
||||
if d.NextSyncCommittee.Root() != d.Update.NextSyncCommitteeRoot { |
||||
return nil, nil, errors.New("wrong sync committee root") |
||||
} |
||||
updates[i], committees[i] = new(types.LightClientUpdate), new(types.SerializedSyncCommittee) |
||||
*updates[i], *committees[i] = d.Update, d.NextSyncCommittee |
||||
} |
||||
return updates, committees, nil |
||||
} |
||||
|
||||
// GetOptimisticHeadUpdate fetches a signed header based on the latest available
|
||||
// optimistic update. Note that the signature should be verified by the caller
|
||||
// as its validity depends on the update chain.
|
||||
//
|
||||
// See data structure definition here:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
|
||||
func (api *BeaconLightApi) GetOptimisticHeadUpdate() (types.SignedHeader, error) { |
||||
resp, err := api.httpGet("/eth/v1/beacon/light_client/optimistic_update") |
||||
if err != nil { |
||||
return types.SignedHeader{}, err |
||||
} |
||||
return decodeOptimisticHeadUpdate(resp) |
||||
} |
||||
|
||||
func decodeOptimisticHeadUpdate(enc []byte) (types.SignedHeader, error) { |
||||
var data struct { |
||||
Data struct { |
||||
Header jsonBeaconHeader `json:"attested_header"` |
||||
Aggregate types.SyncAggregate `json:"sync_aggregate"` |
||||
SignatureSlot common.Decimal `json:"signature_slot"` |
||||
} `json:"data"` |
||||
} |
||||
if err := json.Unmarshal(enc, &data); err != nil { |
||||
return types.SignedHeader{}, err |
||||
} |
||||
if data.Data.Header.Beacon.StateRoot == (common.Hash{}) { |
||||
// workaround for different event encoding format in Lodestar
|
||||
if err := json.Unmarshal(enc, &data.Data); err != nil { |
||||
return types.SignedHeader{}, err |
||||
} |
||||
} |
||||
|
||||
if len(data.Data.Aggregate.Signers) != params.SyncCommitteeBitmaskSize { |
||||
return types.SignedHeader{}, errors.New("invalid sync_committee_bits length") |
||||
} |
||||
if len(data.Data.Aggregate.Signature) != params.BLSSignatureSize { |
||||
return types.SignedHeader{}, errors.New("invalid sync_committee_signature length") |
||||
} |
||||
return types.SignedHeader{ |
||||
Header: data.Data.Header.Beacon, |
||||
Signature: data.Data.Aggregate, |
||||
SignatureSlot: uint64(data.Data.SignatureSlot), |
||||
}, nil |
||||
} |
||||
|
||||
// GetFinalityUpdate fetches the latest available finality update.
|
||||
//
|
||||
// See data structure definition here:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
|
||||
func (api *BeaconLightApi) GetFinalityUpdate() (types.FinalityUpdate, error) { |
||||
resp, err := api.httpGet("/eth/v1/beacon/light_client/finality_update") |
||||
if err != nil { |
||||
return types.FinalityUpdate{}, err |
||||
} |
||||
return decodeFinalityUpdate(resp) |
||||
} |
||||
|
||||
func decodeFinalityUpdate(enc []byte) (types.FinalityUpdate, error) { |
||||
var data struct { |
||||
Data struct { |
||||
Attested jsonHeaderWithExecProof `json:"attested_header"` |
||||
Finalized jsonHeaderWithExecProof `json:"finalized_header"` |
||||
FinalityBranch merkle.Values `json:"finality_branch"` |
||||
Aggregate types.SyncAggregate `json:"sync_aggregate"` |
||||
SignatureSlot common.Decimal `json:"signature_slot"` |
||||
} `json:"data"` |
||||
} |
||||
if err := json.Unmarshal(enc, &data); err != nil { |
||||
return types.FinalityUpdate{}, err |
||||
} |
||||
|
||||
if len(data.Data.Aggregate.Signers) != params.SyncCommitteeBitmaskSize { |
||||
return types.FinalityUpdate{}, errors.New("invalid sync_committee_bits length") |
||||
} |
||||
if len(data.Data.Aggregate.Signature) != params.BLSSignatureSize { |
||||
return types.FinalityUpdate{}, errors.New("invalid sync_committee_signature length") |
||||
} |
||||
return types.FinalityUpdate{ |
||||
Attested: types.HeaderWithExecProof{ |
||||
Header: data.Data.Attested.Beacon, |
||||
PayloadHeader: data.Data.Attested.Execution, |
||||
PayloadBranch: data.Data.Attested.ExecutionBranch, |
||||
}, |
||||
Finalized: types.HeaderWithExecProof{ |
||||
Header: data.Data.Finalized.Beacon, |
||||
PayloadHeader: data.Data.Finalized.Execution, |
||||
PayloadBranch: data.Data.Finalized.ExecutionBranch, |
||||
}, |
||||
FinalityBranch: data.Data.FinalityBranch, |
||||
Signature: data.Data.Aggregate, |
||||
SignatureSlot: uint64(data.Data.SignatureSlot), |
||||
}, nil |
||||
} |
||||
|
||||
// GetHead fetches and validates the beacon header with the given blockRoot.
|
||||
// If blockRoot is null hash then the latest head header is fetched.
|
||||
func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error) { |
||||
var blockId string |
||||
if blockRoot == (common.Hash{}) { |
||||
blockId = "head" |
||||
} else { |
||||
blockId = blockRoot.Hex() |
||||
} |
||||
resp, err := api.httpGetf("/eth/v1/beacon/headers/%s", blockId) |
||||
if err != nil { |
||||
return types.Header{}, err |
||||
} |
||||
|
||||
var data struct { |
||||
Data struct { |
||||
Root common.Hash `json:"root"` |
||||
Canonical bool `json:"canonical"` |
||||
Header struct { |
||||
Message types.Header `json:"message"` |
||||
Signature hexutil.Bytes `json:"signature"` |
||||
} `json:"header"` |
||||
} `json:"data"` |
||||
} |
||||
if err := json.Unmarshal(resp, &data); err != nil { |
||||
return types.Header{}, err |
||||
} |
||||
header := data.Data.Header.Message |
||||
if blockRoot == (common.Hash{}) { |
||||
blockRoot = data.Data.Root |
||||
} |
||||
if header.Hash() != blockRoot { |
||||
return types.Header{}, errors.New("retrieved beacon header root does not match") |
||||
} |
||||
return header, nil |
||||
} |
||||
|
||||
// GetCheckpointData fetches and validates bootstrap data belonging to the given checkpoint.
|
||||
func (api *BeaconLightApi) GetCheckpointData(checkpointHash common.Hash) (*types.BootstrapData, error) { |
||||
resp, err := api.httpGetf("/eth/v1/beacon/light_client/bootstrap/0x%x", checkpointHash[:]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// See data structure definition here:
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
|
||||
type bootstrapData struct { |
||||
Data struct { |
||||
Header jsonBeaconHeader `json:"header"` |
||||
Committee *types.SerializedSyncCommittee `json:"current_sync_committee"` |
||||
CommitteeBranch merkle.Values `json:"current_sync_committee_branch"` |
||||
} `json:"data"` |
||||
} |
||||
|
||||
var data bootstrapData |
||||
if err := json.Unmarshal(resp, &data); err != nil { |
||||
return nil, err |
||||
} |
||||
if data.Data.Committee == nil { |
||||
return nil, errors.New("sync committee is missing") |
||||
} |
||||
header := data.Data.Header.Beacon |
||||
if header.Hash() != checkpointHash { |
||||
return nil, fmt.Errorf("invalid checkpoint block header, have %v want %v", header.Hash(), checkpointHash) |
||||
} |
||||
checkpoint := &types.BootstrapData{ |
||||
Header: header, |
||||
CommitteeBranch: data.Data.CommitteeBranch, |
||||
CommitteeRoot: data.Data.Committee.Root(), |
||||
Committee: data.Data.Committee, |
||||
} |
||||
if err := checkpoint.Validate(); err != nil { |
||||
return nil, fmt.Errorf("invalid checkpoint: %w", err) |
||||
} |
||||
if checkpoint.Header.Hash() != checkpointHash { |
||||
return nil, errors.New("wrong checkpoint hash") |
||||
} |
||||
return checkpoint, nil |
||||
} |
||||
|
||||
func (api *BeaconLightApi) GetBeaconBlock(blockRoot common.Hash) (*capella.BeaconBlock, error) { |
||||
resp, err := api.httpGetf("/eth/v2/beacon/blocks/0x%x", blockRoot) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
var beaconBlockMessage struct { |
||||
Data struct { |
||||
Message capella.BeaconBlock `json:"message"` |
||||
} `json:"data"` |
||||
} |
||||
if err := json.Unmarshal(resp, &beaconBlockMessage); err != nil { |
||||
return nil, fmt.Errorf("invalid block json data: %v", err) |
||||
} |
||||
beaconBlock := new(capella.BeaconBlock) |
||||
*beaconBlock = beaconBlockMessage.Data.Message |
||||
root := common.Hash(beaconBlock.HashTreeRoot(configs.Mainnet, tree.GetHashFn())) |
||||
if root != blockRoot { |
||||
return nil, fmt.Errorf("Beacon block root hash mismatch (expected: %x, got: %x)", blockRoot, root) |
||||
} |
||||
return beaconBlock, nil |
||||
} |
||||
|
||||
func decodeHeadEvent(enc []byte) (uint64, common.Hash, error) { |
||||
var data struct { |
||||
Slot common.Decimal `json:"slot"` |
||||
Block common.Hash `json:"block"` |
||||
} |
||||
if err := json.Unmarshal(enc, &data); err != nil { |
||||
return 0, common.Hash{}, err |
||||
} |
||||
return uint64(data.Slot), data.Block, nil |
||||
} |
||||
|
||||
type HeadEventListener struct { |
||||
OnNewHead func(slot uint64, blockRoot common.Hash) |
||||
OnSignedHead func(head types.SignedHeader) |
||||
OnFinality func(head types.FinalityUpdate) |
||||
OnError func(err error) |
||||
} |
||||
|
||||
// StartHeadListener creates an event subscription for heads and signed (optimistic)
|
||||
// head updates and calls the specified callback functions when they are received.
|
||||
// The callbacks are also called for the current head and optimistic head at startup.
|
||||
// They are never called concurrently.
|
||||
func (api *BeaconLightApi) StartHeadListener(listener HeadEventListener) func() { |
||||
closeCh := make(chan struct{}) // initiate closing the stream
|
||||
closedCh := make(chan struct{}) // stream closed (or failed to create)
|
||||
stoppedCh := make(chan struct{}) // sync loop stopped
|
||||
streamCh := make(chan *eventsource.Stream, 1) |
||||
go func() { |
||||
defer close(closedCh) |
||||
// when connected to a Lodestar node the subscription blocks until the
|
||||
// first actual event arrives; therefore we create the subscription in
|
||||
// a separate goroutine while letting the main goroutine sync up to the
|
||||
// current head
|
||||
req, err := http.NewRequest("GET", api.url+ |
||||
"/eth/v1/events?topics=head&topics=light_client_optimistic_update&topics=light_client_finality_update", nil) |
||||
if err != nil { |
||||
listener.OnError(fmt.Errorf("error creating event subscription request: %v", err)) |
||||
return |
||||
} |
||||
for k, v := range api.customHeaders { |
||||
req.Header.Set(k, v) |
||||
} |
||||
stream, err := eventsource.SubscribeWithRequest("", req) |
||||
if err != nil { |
||||
listener.OnError(fmt.Errorf("error creating event subscription: %v", err)) |
||||
close(streamCh) |
||||
return |
||||
} |
||||
streamCh <- stream |
||||
<-closeCh |
||||
stream.Close() |
||||
}() |
||||
|
||||
go func() { |
||||
defer close(stoppedCh) |
||||
|
||||
if head, err := api.GetHeader(common.Hash{}); err == nil { |
||||
listener.OnNewHead(head.Slot, head.Hash()) |
||||
} |
||||
if signedHead, err := api.GetOptimisticHeadUpdate(); err == nil { |
||||
listener.OnSignedHead(signedHead) |
||||
} |
||||
if finalityUpdate, err := api.GetFinalityUpdate(); err == nil { |
||||
listener.OnFinality(finalityUpdate) |
||||
} |
||||
stream := <-streamCh |
||||
if stream == nil { |
||||
return |
||||
} |
||||
|
||||
for { |
||||
select { |
||||
case event, ok := <-stream.Events: |
||||
if !ok { |
||||
break |
||||
} |
||||
switch event.Event() { |
||||
case "head": |
||||
if slot, blockRoot, err := decodeHeadEvent([]byte(event.Data())); err == nil { |
||||
listener.OnNewHead(slot, blockRoot) |
||||
} else { |
||||
listener.OnError(fmt.Errorf("error decoding head event: %v", err)) |
||||
} |
||||
case "light_client_optimistic_update": |
||||
if signedHead, err := decodeOptimisticHeadUpdate([]byte(event.Data())); err == nil { |
||||
listener.OnSignedHead(signedHead) |
||||
} else { |
||||
listener.OnError(fmt.Errorf("error decoding optimistic update event: %v", err)) |
||||
} |
||||
case "light_client_finality_update": |
||||
if finalityUpdate, err := decodeFinalityUpdate([]byte(event.Data())); err == nil { |
||||
listener.OnFinality(finalityUpdate) |
||||
} else { |
||||
listener.OnError(fmt.Errorf("error decoding finality update event: %v", err)) |
||||
} |
||||
default: |
||||
listener.OnError(fmt.Errorf("unexpected event: %s", event.Event())) |
||||
} |
||||
case err, ok := <-stream.Errors: |
||||
if !ok { |
||||
break |
||||
} |
||||
listener.OnError(err) |
||||
} |
||||
} |
||||
}() |
||||
return func() { |
||||
close(closeCh) |
||||
<-closedCh |
||||
<-stoppedCh |
||||
} |
||||
} |
@ -0,0 +1,150 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"errors" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
// HeadTracker keeps track of the latest validated head and the "prefetch" head
|
||||
// which is the (not necessarily validated) head announced by the majority of
|
||||
// servers.
|
||||
type HeadTracker struct { |
||||
lock sync.RWMutex |
||||
committeeChain *CommitteeChain |
||||
minSignerCount int |
||||
signedHead types.SignedHeader |
||||
hasSignedHead bool |
||||
finalityUpdate types.FinalityUpdate |
||||
hasFinalityUpdate bool |
||||
prefetchHead types.HeadInfo |
||||
changeCounter uint64 |
||||
} |
||||
|
||||
// NewHeadTracker creates a new HeadTracker.
|
||||
func NewHeadTracker(committeeChain *CommitteeChain, minSignerCount int) *HeadTracker { |
||||
return &HeadTracker{ |
||||
committeeChain: committeeChain, |
||||
minSignerCount: minSignerCount, |
||||
} |
||||
} |
||||
|
||||
// ValidatedHead returns the latest validated head.
|
||||
func (h *HeadTracker) ValidatedHead() (types.SignedHeader, bool) { |
||||
h.lock.RLock() |
||||
defer h.lock.RUnlock() |
||||
|
||||
return h.signedHead, h.hasSignedHead |
||||
} |
||||
|
||||
// ValidatedHead returns the latest validated head.
|
||||
func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) { |
||||
h.lock.RLock() |
||||
defer h.lock.RUnlock() |
||||
|
||||
return h.finalityUpdate, h.hasFinalityUpdate |
||||
} |
||||
|
||||
// Validate validates the given signed head. If the head is successfully validated
|
||||
// and it is better than the old validated head (higher slot or same slot and more
|
||||
// signers) then ValidatedHead is updated. The boolean return flag signals if
|
||||
// ValidatedHead has been changed.
|
||||
func (h *HeadTracker) ValidateHead(head types.SignedHeader) (bool, error) { |
||||
h.lock.Lock() |
||||
defer h.lock.Unlock() |
||||
|
||||
replace, err := h.validate(head, h.signedHead) |
||||
if replace { |
||||
h.signedHead, h.hasSignedHead = head, true |
||||
h.changeCounter++ |
||||
} |
||||
return replace, err |
||||
} |
||||
|
||||
func (h *HeadTracker) ValidateFinality(update types.FinalityUpdate) (bool, error) { |
||||
h.lock.Lock() |
||||
defer h.lock.Unlock() |
||||
|
||||
replace, err := h.validate(update.SignedHeader(), h.finalityUpdate.SignedHeader()) |
||||
if replace { |
||||
h.finalityUpdate, h.hasFinalityUpdate = update, true |
||||
h.changeCounter++ |
||||
} |
||||
return replace, err |
||||
} |
||||
|
||||
func (h *HeadTracker) validate(head, oldHead types.SignedHeader) (bool, error) { |
||||
signerCount := head.Signature.SignerCount() |
||||
if signerCount < h.minSignerCount { |
||||
return false, errors.New("low signer count") |
||||
} |
||||
if head.Header.Slot < oldHead.Header.Slot || (head.Header.Slot == oldHead.Header.Slot && signerCount <= oldHead.Signature.SignerCount()) { |
||||
return false, nil |
||||
} |
||||
sigOk, age, err := h.committeeChain.VerifySignedHeader(head) |
||||
if err != nil { |
||||
return false, err |
||||
} |
||||
if age < 0 { |
||||
log.Warn("Future signed head received", "age", age) |
||||
} |
||||
if age > time.Minute*2 { |
||||
log.Warn("Old signed head received", "age", age) |
||||
} |
||||
if !sigOk { |
||||
return false, errors.New("invalid header signature") |
||||
} |
||||
return true, nil |
||||
} |
||||
|
||||
// PrefetchHead returns the latest known prefetch head's head info.
|
||||
// This head can be used to start fetching related data hoping that it will be
|
||||
// validated soon.
|
||||
// Note that the prefetch head cannot be validated cryptographically so it should
|
||||
// only be used as a performance optimization hint.
|
||||
func (h *HeadTracker) PrefetchHead() types.HeadInfo { |
||||
h.lock.RLock() |
||||
defer h.lock.RUnlock() |
||||
|
||||
return h.prefetchHead |
||||
} |
||||
|
||||
// SetPrefetchHead sets the prefetch head info.
|
||||
// Note that HeadTracker does not verify the prefetch head, just acts as a thread
|
||||
// safe bulletin board.
|
||||
func (h *HeadTracker) SetPrefetchHead(head types.HeadInfo) { |
||||
h.lock.Lock() |
||||
defer h.lock.Unlock() |
||||
|
||||
if head == h.prefetchHead { |
||||
return |
||||
} |
||||
h.prefetchHead = head |
||||
h.changeCounter++ |
||||
} |
||||
|
||||
func (h *HeadTracker) ChangeCounter() uint64 { |
||||
h.lock.RLock() |
||||
defer h.lock.RUnlock() |
||||
|
||||
return h.changeCounter |
||||
} |
@ -0,0 +1,401 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package request |
||||
|
||||
import ( |
||||
"sync" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
// Module represents a mechanism which is typically responsible for downloading
|
||||
// and updating a passive data structure. It does not directly interact with the
|
||||
// servers. It can start requests using the Requester interface, maintain its
|
||||
// internal state by receiving and processing Events and update its target data
|
||||
// structure based on the obtained data.
|
||||
// It is the Scheduler's responsibility to feed events to the modules, call
|
||||
// Process as long as there might be something to process and then generate request
|
||||
// candidates using MakeRequest and start the best possible requests.
|
||||
// Modules are called by Scheduler whenever a global trigger is fired. All events
|
||||
// fire the trigger. Changing a target data structure also triggers a next
|
||||
// processing round as it could make further actions possible either by the same
|
||||
// or another Module.
|
||||
type Module interface { |
||||
// Process is a non-blocking function responsible for starting requests,
|
||||
// processing events and updating the target data structures(s) and the
|
||||
// internal state of the module. Module state typically consists of information
|
||||
// about pending requests and registered servers.
|
||||
// Process is always called after an event is received or after a target data
|
||||
// structure has been changed.
|
||||
//
|
||||
// Note: Process functions of different modules are never called concurrently;
|
||||
// they are called by Scheduler in the same order of priority as they were
|
||||
// registered in.
|
||||
Process(Requester, []Event) |
||||
} |
||||
|
||||
// Requester allows Modules to obtain the list of momentarily available servers,
|
||||
// start new requests and report server failure when a response has been proven
|
||||
// to be invalid in the processing phase.
|
||||
// Note that all Requester functions should be safe to call from Module.Process.
|
||||
type Requester interface { |
||||
CanSendTo() []Server |
||||
Send(Server, Request) ID |
||||
Fail(Server, string) |
||||
} |
||||
|
||||
// Scheduler is a modular network data retrieval framework that coordinates multiple
|
||||
// servers and retrieval mechanisms (modules). It implements a trigger mechanism
|
||||
// that calls the Process function of registered modules whenever either the state
|
||||
// of existing data structures or events coming from registered servers could
|
||||
// allow new operations.
|
||||
type Scheduler struct { |
||||
lock sync.Mutex |
||||
modules []Module // first has highest priority
|
||||
names map[Module]string |
||||
servers map[server]struct{} |
||||
targets map[targetData]uint64 |
||||
|
||||
requesterLock sync.RWMutex |
||||
serverOrder []server |
||||
pending map[ServerAndID]pendingRequest |
||||
|
||||
// eventLock guards access to the events list. Note that eventLock can be
|
||||
// locked either while lock is locked or unlocked but lock cannot be locked
|
||||
// while eventLock is locked.
|
||||
eventLock sync.Mutex |
||||
events []Event |
||||
stopCh chan chan struct{} |
||||
|
||||
triggerCh chan struct{} // restarts waiting sync loop
|
||||
// if trigger has already been fired then send to testWaitCh blocks until
|
||||
// the triggered processing round is finished
|
||||
testWaitCh chan struct{} |
||||
} |
||||
|
||||
type ( |
||||
// Server identifies a server without allowing any direct interaction.
|
||||
// Note: server interface is used by Scheduler and Tracker but not used by
|
||||
// the modules that do not interact with them directly.
|
||||
// In order to make module testing easier, Server interface is used in
|
||||
// events and modules.
|
||||
Server any |
||||
Request any |
||||
Response any |
||||
ID uint64 |
||||
ServerAndID struct { |
||||
Server Server |
||||
ID ID |
||||
} |
||||
) |
||||
|
||||
// targetData represents a registered target data structure that increases its
|
||||
// ChangeCounter whenever it has been changed.
|
||||
type targetData interface { |
||||
ChangeCounter() uint64 |
||||
} |
||||
|
||||
// pendingRequest keeps track of sent and not yet finalized requests and their
|
||||
// sender modules.
|
||||
type pendingRequest struct { |
||||
request Request |
||||
module Module |
||||
} |
||||
|
||||
// NewScheduler creates a new Scheduler.
|
||||
func NewScheduler() *Scheduler { |
||||
s := &Scheduler{ |
||||
servers: make(map[server]struct{}), |
||||
names: make(map[Module]string), |
||||
pending: make(map[ServerAndID]pendingRequest), |
||||
targets: make(map[targetData]uint64), |
||||
stopCh: make(chan chan struct{}), |
||||
// Note: testWaitCh should not have capacity in order to ensure
|
||||
// that after a trigger happens testWaitCh will block until the resulting
|
||||
// processing round has been finished
|
||||
triggerCh: make(chan struct{}, 1), |
||||
testWaitCh: make(chan struct{}), |
||||
} |
||||
return s |
||||
} |
||||
|
||||
// RegisterTarget registers a target data structure, ensuring that any changes
|
||||
// made to it trigger a new round of Module.Process calls, giving a chance to
|
||||
// modules to react to the changes.
|
||||
func (s *Scheduler) RegisterTarget(t targetData) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
s.targets[t] = 0 |
||||
} |
||||
|
||||
// RegisterModule registers a module. Should be called before starting the scheduler.
|
||||
// In each processing round the order of module processing depends on the order of
|
||||
// registration.
|
||||
func (s *Scheduler) RegisterModule(m Module, name string) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
s.modules = append(s.modules, m) |
||||
s.names[m] = name |
||||
} |
||||
|
||||
// RegisterServer registers a new server.
|
||||
func (s *Scheduler) RegisterServer(server server) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
s.addEvent(Event{Type: EvRegistered, Server: server}) |
||||
server.subscribe(func(event Event) { |
||||
event.Server = server |
||||
s.addEvent(event) |
||||
}) |
||||
} |
||||
|
||||
// UnregisterServer removes a registered server.
|
||||
func (s *Scheduler) UnregisterServer(server server) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
server.unsubscribe() |
||||
s.addEvent(Event{Type: EvUnregistered, Server: server}) |
||||
} |
||||
|
||||
// Start starts the scheduler. It should be called after registering all modules
|
||||
// and before registering any servers.
|
||||
func (s *Scheduler) Start() { |
||||
go s.syncLoop() |
||||
} |
||||
|
||||
// Stop stops the scheduler.
|
||||
func (s *Scheduler) Stop() { |
||||
stop := make(chan struct{}) |
||||
s.stopCh <- stop |
||||
<-stop |
||||
s.lock.Lock() |
||||
for server := range s.servers { |
||||
server.unsubscribe() |
||||
} |
||||
s.servers = nil |
||||
s.lock.Unlock() |
||||
} |
||||
|
||||
// syncLoop is the main event loop responsible for event/data processing and
|
||||
// sending new requests.
|
||||
// A round of processing starts whenever the global trigger is fired. Triggers
|
||||
// fired during a processing round ensure that there is going to be a next round.
|
||||
func (s *Scheduler) syncLoop() { |
||||
for { |
||||
s.lock.Lock() |
||||
s.processRound() |
||||
s.lock.Unlock() |
||||
loop: |
||||
for { |
||||
select { |
||||
case stop := <-s.stopCh: |
||||
close(stop) |
||||
return |
||||
case <-s.triggerCh: |
||||
break loop |
||||
case <-s.testWaitCh: |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// targetChanged returns true if a registered target data structure has been
|
||||
// changed since the last call to this function.
|
||||
func (s *Scheduler) targetChanged() (changed bool) { |
||||
for target, counter := range s.targets { |
||||
if newCounter := target.ChangeCounter(); newCounter != counter { |
||||
s.targets[target] = newCounter |
||||
changed = true |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// processRound runs an entire processing round. It calls the Process functions
|
||||
// of all modules, passing all relevant events and repeating Process calls as
|
||||
// long as any changes have been made to the registered target data structures.
|
||||
// Once all events have been processed and a stable state has been achieved,
|
||||
// requests are generated and sent if necessary and possible.
|
||||
func (s *Scheduler) processRound() { |
||||
for { |
||||
log.Trace("Processing modules") |
||||
filteredEvents := s.filterEvents() |
||||
for _, module := range s.modules { |
||||
log.Trace("Processing module", "name", s.names[module], "events", len(filteredEvents[module])) |
||||
module.Process(requester{s, module}, filteredEvents[module]) |
||||
} |
||||
if !s.targetChanged() { |
||||
break |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Trigger starts a new processing round. If fired during processing, it ensures
|
||||
// another full round of processing all modules.
|
||||
func (s *Scheduler) Trigger() { |
||||
select { |
||||
case s.triggerCh <- struct{}{}: |
||||
default: |
||||
} |
||||
} |
||||
|
||||
// addEvent adds an event to be processed in the next round. Note that it can be
|
||||
// called regardless of the state of the lock mutex, making it safe for use in
|
||||
// the server event callback.
|
||||
func (s *Scheduler) addEvent(event Event) { |
||||
s.eventLock.Lock() |
||||
s.events = append(s.events, event) |
||||
s.eventLock.Unlock() |
||||
s.Trigger() |
||||
} |
||||
|
||||
// filterEvent sorts each Event either as a request event or a server event,
|
||||
// depending on its type. Request events are also sorted in a map based on the
|
||||
// module that originally initiated the request. It also ensures that no events
|
||||
// related to a server are returned before EvRegistered or after EvUnregistered.
|
||||
// In case of an EvUnregistered server event it also closes all pending requests
|
||||
// to the given server by adding a failed request event (EvFail), ensuring that
|
||||
// all requests get finalized and thereby allowing the module logic to be safe
|
||||
// and simple.
|
||||
func (s *Scheduler) filterEvents() map[Module][]Event { |
||||
s.eventLock.Lock() |
||||
events := s.events |
||||
s.events = nil |
||||
s.eventLock.Unlock() |
||||
|
||||
s.requesterLock.Lock() |
||||
defer s.requesterLock.Unlock() |
||||
|
||||
filteredEvents := make(map[Module][]Event) |
||||
for _, event := range events { |
||||
server := event.Server.(server) |
||||
if _, ok := s.servers[server]; !ok && event.Type != EvRegistered { |
||||
continue // before EvRegister or after EvUnregister, discard
|
||||
} |
||||
|
||||
if event.IsRequestEvent() { |
||||
sid, _, _ := event.RequestInfo() |
||||
pending, ok := s.pending[sid] |
||||
if !ok { |
||||
continue // request already closed, ignore further events
|
||||
} |
||||
if event.Type == EvResponse || event.Type == EvFail { |
||||
delete(s.pending, sid) // final event, close pending request
|
||||
} |
||||
filteredEvents[pending.module] = append(filteredEvents[pending.module], event) |
||||
} else { |
||||
switch event.Type { |
||||
case EvRegistered: |
||||
s.servers[server] = struct{}{} |
||||
s.serverOrder = append(s.serverOrder, nil) |
||||
copy(s.serverOrder[1:], s.serverOrder[:len(s.serverOrder)-1]) |
||||
s.serverOrder[0] = server |
||||
case EvUnregistered: |
||||
s.closePending(event.Server, filteredEvents) |
||||
delete(s.servers, server) |
||||
for i, srv := range s.serverOrder { |
||||
if srv == server { |
||||
copy(s.serverOrder[i:len(s.serverOrder)-1], s.serverOrder[i+1:]) |
||||
s.serverOrder = s.serverOrder[:len(s.serverOrder)-1] |
||||
break |
||||
} |
||||
} |
||||
} |
||||
for _, module := range s.modules { |
||||
filteredEvents[module] = append(filteredEvents[module], event) |
||||
} |
||||
} |
||||
} |
||||
return filteredEvents |
||||
} |
||||
|
||||
// closePending closes all pending requests to the given server and adds an EvFail
|
||||
// event to properly finalize them
|
||||
func (s *Scheduler) closePending(server Server, filteredEvents map[Module][]Event) { |
||||
for sid, pending := range s.pending { |
||||
if sid.Server == server { |
||||
filteredEvents[pending.module] = append(filteredEvents[pending.module], Event{ |
||||
Type: EvFail, |
||||
Server: server, |
||||
Data: RequestResponse{ |
||||
ID: sid.ID, |
||||
Request: pending.request, |
||||
}, |
||||
}) |
||||
delete(s.pending, sid) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// requester implements Requester. Note that while requester basically wraps
|
||||
// Scheduler (with the added information of the currently processed Module), all
|
||||
// functions are safe to call from Module.Process which is running while
|
||||
// the Scheduler.lock mutex is held.
|
||||
type requester struct { |
||||
*Scheduler |
||||
module Module |
||||
} |
||||
|
||||
// CanSendTo returns the list of currently available servers. It also returns
|
||||
// them in an order of least to most recently used, ensuring a round-robin usage
|
||||
// of suitable servers if the module always chooses the first suitable one.
|
||||
func (s requester) CanSendTo() []Server { |
||||
s.requesterLock.RLock() |
||||
defer s.requesterLock.RUnlock() |
||||
|
||||
list := make([]Server, 0, len(s.serverOrder)) |
||||
for _, server := range s.serverOrder { |
||||
if server.canRequestNow() { |
||||
list = append(list, server) |
||||
} |
||||
} |
||||
return list |
||||
} |
||||
|
||||
// Send sends a request and adds an entry to Scheduler.pending map, ensuring that
|
||||
// related request events will be delivered to the sender Module.
|
||||
func (s requester) Send(srv Server, req Request) ID { |
||||
s.requesterLock.Lock() |
||||
defer s.requesterLock.Unlock() |
||||
|
||||
server := srv.(server) |
||||
id := server.sendRequest(req) |
||||
sid := ServerAndID{Server: srv, ID: id} |
||||
s.pending[sid] = pendingRequest{request: req, module: s.module} |
||||
for i, ss := range s.serverOrder { |
||||
if ss == server { |
||||
copy(s.serverOrder[i:len(s.serverOrder)-1], s.serverOrder[i+1:]) |
||||
s.serverOrder[len(s.serverOrder)-1] = server |
||||
return id |
||||
} |
||||
} |
||||
log.Error("Target server not found in ordered list of registered servers") |
||||
return id |
||||
} |
||||
|
||||
// Fail should be called when a server delivers invalid or useless information.
|
||||
// Calling Fail disables the given server for a period that is initially short
|
||||
// but is exponentially growing if it happens frequently. This results in a
|
||||
// somewhat fault tolerant operation that avoids hammering servers with requests
|
||||
// that they cannot serve but still gives them a chance periodically.
|
||||
func (s requester) Fail(srv Server, desc string) { |
||||
srv.(server).fail(desc) |
||||
} |
@ -0,0 +1,122 @@ |
||||
package request |
||||
|
||||
import ( |
||||
"reflect" |
||||
"testing" |
||||
) |
||||
|
||||
func TestEventFilter(t *testing.T) { |
||||
s := NewScheduler() |
||||
module1 := &testModule{name: "module1"} |
||||
module2 := &testModule{name: "module2"} |
||||
s.RegisterModule(module1, "module1") |
||||
s.RegisterModule(module2, "module2") |
||||
s.Start() |
||||
// startup process round without events
|
||||
s.testWaitCh <- struct{}{} |
||||
module1.expProcess(t, nil) |
||||
module2.expProcess(t, nil) |
||||
srv := &testServer{} |
||||
// register server; both modules should receive server event
|
||||
s.RegisterServer(srv) |
||||
s.testWaitCh <- struct{}{} |
||||
module1.expProcess(t, []Event{ |
||||
{Type: EvRegistered, Server: srv}, |
||||
}) |
||||
module2.expProcess(t, []Event{ |
||||
{Type: EvRegistered, Server: srv}, |
||||
}) |
||||
// let module1 send a request
|
||||
srv.canRequest = 1 |
||||
module1.sendReq = testRequest |
||||
s.Trigger() |
||||
// in first triggered round module1 sends the request, no events yet
|
||||
s.testWaitCh <- struct{}{} |
||||
module1.expProcess(t, nil) |
||||
module2.expProcess(t, nil) |
||||
// server emits EvTimeout; only module1 should receive it
|
||||
srv.eventCb(Event{Type: EvTimeout, Data: RequestResponse{ID: 1, Request: testRequest}}) |
||||
s.testWaitCh <- struct{}{} |
||||
module1.expProcess(t, []Event{ |
||||
{Type: EvTimeout, Server: srv, Data: RequestResponse{ID: 1, Request: testRequest}}, |
||||
}) |
||||
module2.expProcess(t, nil) |
||||
// unregister server; both modules should receive server event
|
||||
s.UnregisterServer(srv) |
||||
s.testWaitCh <- struct{}{} |
||||
module1.expProcess(t, []Event{ |
||||
// module1 should also receive EvFail on its pending request
|
||||
{Type: EvFail, Server: srv, Data: RequestResponse{ID: 1, Request: testRequest}}, |
||||
{Type: EvUnregistered, Server: srv}, |
||||
}) |
||||
module2.expProcess(t, []Event{ |
||||
{Type: EvUnregistered, Server: srv}, |
||||
}) |
||||
// response after server unregistered; should be discarded
|
||||
srv.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}}) |
||||
s.testWaitCh <- struct{}{} |
||||
module1.expProcess(t, nil) |
||||
module2.expProcess(t, nil) |
||||
// no more process rounds expected; shut down
|
||||
s.testWaitCh <- struct{}{} |
||||
module1.expNoMoreProcess(t) |
||||
module2.expNoMoreProcess(t) |
||||
s.Stop() |
||||
} |
||||
|
||||
type testServer struct { |
||||
eventCb func(Event) |
||||
lastID ID |
||||
canRequest int |
||||
} |
||||
|
||||
func (s *testServer) subscribe(eventCb func(Event)) { |
||||
s.eventCb = eventCb |
||||
} |
||||
|
||||
func (s *testServer) canRequestNow() bool { |
||||
return s.canRequest > 0 |
||||
} |
||||
|
||||
func (s *testServer) sendRequest(req Request) ID { |
||||
s.canRequest-- |
||||
s.lastID++ |
||||
return s.lastID |
||||
} |
||||
|
||||
func (s *testServer) fail(string) {} |
||||
func (s *testServer) unsubscribe() {} |
||||
|
||||
type testModule struct { |
||||
name string |
||||
processed [][]Event |
||||
sendReq Request |
||||
} |
||||
|
||||
func (m *testModule) Process(requester Requester, events []Event) { |
||||
m.processed = append(m.processed, events) |
||||
if m.sendReq != nil { |
||||
if cs := requester.CanSendTo(); len(cs) > 0 { |
||||
requester.Send(cs[0], m.sendReq) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (m *testModule) expProcess(t *testing.T, expEvents []Event) { |
||||
if len(m.processed) == 0 { |
||||
t.Errorf("Missing call to %s.Process", m.name) |
||||
return |
||||
} |
||||
events := m.processed[0] |
||||
m.processed = m.processed[1:] |
||||
if !reflect.DeepEqual(events, expEvents) { |
||||
t.Errorf("Call to %s.Process with wrong events (expected %v, got %v)", m.name, expEvents, events) |
||||
} |
||||
} |
||||
|
||||
func (m *testModule) expNoMoreProcess(t *testing.T) { |
||||
for len(m.processed) > 0 { |
||||
t.Errorf("Unexpected call to %s.Process with events %v", m.name, m.processed[0]) |
||||
m.processed = m.processed[1:] |
||||
} |
||||
} |
@ -0,0 +1,439 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package request |
||||
|
||||
import ( |
||||
"math" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
var ( |
||||
// request events
|
||||
EvResponse = &EventType{Name: "response", requestEvent: true} // data: RequestResponse; sent by requestServer
|
||||
EvFail = &EventType{Name: "fail", requestEvent: true} // data: RequestResponse; sent by requestServer
|
||||
EvTimeout = &EventType{Name: "timeout", requestEvent: true} // data: RequestResponse; sent by serverWithTimeout
|
||||
// server events
|
||||
EvRegistered = &EventType{Name: "registered"} // data: nil; sent by Scheduler
|
||||
EvUnregistered = &EventType{Name: "unregistered"} // data: nil; sent by Scheduler
|
||||
EvCanRequestAgain = &EventType{Name: "canRequestAgain"} // data: nil; sent by serverWithLimits
|
||||
) |
||||
|
||||
const ( |
||||
softRequestTimeout = time.Second // allow resending request to a different server but do not cancel yet
|
||||
hardRequestTimeout = time.Second * 10 // cancel request
|
||||
) |
||||
|
||||
const ( |
||||
// serverWithLimits parameters
|
||||
parallelAdjustUp = 0.1 // adjust parallelLimit up in case of success under full load
|
||||
parallelAdjustDown = 1 // adjust parallelLimit down in case of timeout/failure
|
||||
minParallelLimit = 1 // parallelLimit lower bound
|
||||
defaultParallelLimit = 3 // parallelLimit initial value
|
||||
minFailureDelay = time.Millisecond * 100 // minimum disable time in case of request failure
|
||||
maxFailureDelay = time.Minute // maximum disable time in case of request failure
|
||||
maxServerEventBuffer = 5 // server event allowance buffer limit
|
||||
maxServerEventRate = time.Second // server event allowance buffer recharge rate
|
||||
) |
||||
|
||||
// requestServer can send requests in a non-blocking way and feed back events
|
||||
// through the event callback. After each request it should send back either
|
||||
// EvResponse or EvFail. Additionally, it may also send application-defined
|
||||
// events that the Modules can interpret.
|
||||
type requestServer interface { |
||||
Subscribe(eventCallback func(Event)) |
||||
SendRequest(ID, Request) |
||||
Unsubscribe() |
||||
} |
||||
|
||||
// server is implemented by a requestServer wrapped into serverWithTimeout and
|
||||
// serverWithLimits and is used by Scheduler.
|
||||
// In addition to requestServer functionality, server can also handle timeouts,
|
||||
// limit the number of parallel in-flight requests and temporarily disable
|
||||
// new requests based on timeouts and response failures.
|
||||
type server interface { |
||||
subscribe(eventCallback func(Event)) |
||||
canRequestNow() bool |
||||
sendRequest(Request) ID |
||||
fail(string) |
||||
unsubscribe() |
||||
} |
||||
|
||||
// NewServer wraps a requestServer and returns a server
|
||||
func NewServer(rs requestServer, clock mclock.Clock) server { |
||||
s := &serverWithLimits{} |
||||
s.parent = rs |
||||
s.serverWithTimeout.init(clock) |
||||
s.init() |
||||
return s |
||||
} |
||||
|
||||
// EventType identifies an event type, either related to a request or the server
|
||||
// in general. Server events can also be externally defined.
|
||||
type EventType struct { |
||||
Name string |
||||
requestEvent bool // all request events are pre-defined in request package
|
||||
} |
||||
|
||||
// Event describes an event where the type of Data depends on Type.
|
||||
// Server field is not required when sent through the event callback; it is filled
|
||||
// out when processed by the Scheduler. Note that the Scheduler can also create
|
||||
// and send events (EvRegistered, EvUnregistered) directly.
|
||||
type Event struct { |
||||
Type *EventType |
||||
Server Server // filled by Scheduler
|
||||
Data any |
||||
} |
||||
|
||||
// IsRequestEvent returns true if the event is a request event
|
||||
func (e *Event) IsRequestEvent() bool { |
||||
return e.Type.requestEvent |
||||
} |
||||
|
||||
// RequestInfo assumes that the event is a request event and returns its contents
|
||||
// in a convenient form.
|
||||
func (e *Event) RequestInfo() (ServerAndID, Request, Response) { |
||||
data := e.Data.(RequestResponse) |
||||
return ServerAndID{Server: e.Server, ID: data.ID}, data.Request, data.Response |
||||
} |
||||
|
||||
// RequestResponse is the Data type of request events.
|
||||
type RequestResponse struct { |
||||
ID ID |
||||
Request Request |
||||
Response Response |
||||
} |
||||
|
||||
// serverWithTimeout wraps a requestServer and introduces timeouts.
|
||||
// The request's lifecycle is concluded if EvResponse or EvFail emitted by the
|
||||
// parent requestServer. If this does not happen until softRequestTimeout then
|
||||
// EvTimeout is emitted, after which the final EvResponse or EvFail is still
|
||||
// guaranteed to follow.
|
||||
// If the parent fails to send this final event for hardRequestTimeout then
|
||||
// serverWithTimeout emits EvFail and discards any further events from the
|
||||
// parent related to the given request.
|
||||
type serverWithTimeout struct { |
||||
parent requestServer |
||||
lock sync.Mutex |
||||
clock mclock.Clock |
||||
childEventCb func(event Event) |
||||
timeouts map[ID]mclock.Timer |
||||
lastID ID |
||||
} |
||||
|
||||
// init initializes serverWithTimeout
|
||||
func (s *serverWithTimeout) init(clock mclock.Clock) { |
||||
s.clock = clock |
||||
s.timeouts = make(map[ID]mclock.Timer) |
||||
} |
||||
|
||||
// subscribe subscribes to events which include parent (requestServer) events
|
||||
// plus EvTimeout.
|
||||
func (s *serverWithTimeout) subscribe(eventCallback func(event Event)) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
s.childEventCb = eventCallback |
||||
s.parent.Subscribe(s.eventCallback) |
||||
} |
||||
|
||||
// sendRequest generated a new request ID, emits EvRequest, sets up the timeout
|
||||
// timer, then sends the request through the parent (requestServer).
|
||||
func (s *serverWithTimeout) sendRequest(request Request) (reqId ID) { |
||||
s.lock.Lock() |
||||
s.lastID++ |
||||
id := s.lastID |
||||
s.startTimeout(RequestResponse{ID: id, Request: request}) |
||||
s.lock.Unlock() |
||||
s.parent.SendRequest(id, request) |
||||
return id |
||||
} |
||||
|
||||
// eventCallback is called by parent (requestServer) event subscription.
|
||||
func (s *serverWithTimeout) eventCallback(event Event) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
switch event.Type { |
||||
case EvResponse, EvFail: |
||||
id := event.Data.(RequestResponse).ID |
||||
if timer, ok := s.timeouts[id]; ok { |
||||
// Note: if stopping the timer is unsuccessful then the resulting AfterFunc
|
||||
// call will just do nothing
|
||||
timer.Stop() |
||||
delete(s.timeouts, id) |
||||
s.childEventCb(event) |
||||
} |
||||
default: |
||||
s.childEventCb(event) |
||||
} |
||||
} |
||||
|
||||
// startTimeout starts a timeout timer for the given request.
|
||||
func (s *serverWithTimeout) startTimeout(reqData RequestResponse) { |
||||
id := reqData.ID |
||||
s.timeouts[id] = s.clock.AfterFunc(softRequestTimeout, func() { |
||||
s.lock.Lock() |
||||
if _, ok := s.timeouts[id]; !ok { |
||||
s.lock.Unlock() |
||||
return |
||||
} |
||||
s.timeouts[id] = s.clock.AfterFunc(hardRequestTimeout-softRequestTimeout, func() { |
||||
s.lock.Lock() |
||||
if _, ok := s.timeouts[id]; !ok { |
||||
s.lock.Unlock() |
||||
return |
||||
} |
||||
delete(s.timeouts, id) |
||||
childEventCb := s.childEventCb |
||||
s.lock.Unlock() |
||||
childEventCb(Event{Type: EvFail, Data: reqData}) |
||||
}) |
||||
childEventCb := s.childEventCb |
||||
s.lock.Unlock() |
||||
childEventCb(Event{Type: EvTimeout, Data: reqData}) |
||||
}) |
||||
} |
||||
|
||||
// stop stops all goroutines associated with the server.
|
||||
func (s *serverWithTimeout) unsubscribe() { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
for _, timer := range s.timeouts { |
||||
if timer != nil { |
||||
timer.Stop() |
||||
} |
||||
} |
||||
s.childEventCb = nil |
||||
s.parent.Unsubscribe() |
||||
} |
||||
|
||||
// serverWithLimits wraps serverWithTimeout and implements server. It limits the
|
||||
// number of parallel in-flight requests and prevents sending new requests when a
|
||||
// pending one has already timed out. Server events are also rate limited.
|
||||
// It also implements a failure delay mechanism that adds an exponentially growing
|
||||
// delay each time a request fails (wrong answer or hard timeout). This makes the
|
||||
// syncing mechanism less brittle as temporary failures of the server might happen
|
||||
// sometimes, but still avoids hammering a non-functional server with requests.
|
||||
type serverWithLimits struct { |
||||
serverWithTimeout |
||||
lock sync.Mutex |
||||
childEventCb func(event Event) |
||||
softTimeouts map[ID]struct{} |
||||
pendingCount, timeoutCount int |
||||
parallelLimit float32 |
||||
sendEvent bool |
||||
delayTimer mclock.Timer |
||||
delayCounter int |
||||
failureDelayEnd mclock.AbsTime |
||||
failureDelay float64 |
||||
serverEventBuffer int |
||||
eventBufferUpdated mclock.AbsTime |
||||
} |
||||
|
||||
// init initializes serverWithLimits
|
||||
func (s *serverWithLimits) init() { |
||||
s.softTimeouts = make(map[ID]struct{}) |
||||
s.parallelLimit = defaultParallelLimit |
||||
s.serverEventBuffer = maxServerEventBuffer |
||||
} |
||||
|
||||
// subscribe subscribes to events which include parent (serverWithTimeout) events
|
||||
// plus EvCanRequstAgain.
|
||||
func (s *serverWithLimits) subscribe(eventCallback func(event Event)) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
s.childEventCb = eventCallback |
||||
s.serverWithTimeout.subscribe(s.eventCallback) |
||||
} |
||||
|
||||
// eventCallback is called by parent (serverWithTimeout) event subscription.
|
||||
func (s *serverWithLimits) eventCallback(event Event) { |
||||
s.lock.Lock() |
||||
var sendCanRequestAgain bool |
||||
passEvent := true |
||||
switch event.Type { |
||||
case EvTimeout: |
||||
id := event.Data.(RequestResponse).ID |
||||
s.softTimeouts[id] = struct{}{} |
||||
s.timeoutCount++ |
||||
s.parallelLimit -= parallelAdjustDown |
||||
if s.parallelLimit < minParallelLimit { |
||||
s.parallelLimit = minParallelLimit |
||||
} |
||||
log.Debug("Server timeout", "count", s.timeoutCount, "parallelLimit", s.parallelLimit) |
||||
case EvResponse, EvFail: |
||||
id := event.Data.(RequestResponse).ID |
||||
if _, ok := s.softTimeouts[id]; ok { |
||||
delete(s.softTimeouts, id) |
||||
s.timeoutCount-- |
||||
log.Debug("Server timeout finalized", "count", s.timeoutCount, "parallelLimit", s.parallelLimit) |
||||
} |
||||
if event.Type == EvResponse && s.pendingCount >= int(s.parallelLimit) { |
||||
s.parallelLimit += parallelAdjustUp |
||||
} |
||||
s.pendingCount-- |
||||
if s.canRequest() { |
||||
sendCanRequestAgain = s.sendEvent |
||||
s.sendEvent = false |
||||
} |
||||
if event.Type == EvFail { |
||||
s.failLocked("failed request") |
||||
} |
||||
default: |
||||
// server event; check rate limit
|
||||
if s.serverEventBuffer < maxServerEventBuffer { |
||||
now := s.clock.Now() |
||||
sinceUpdate := time.Duration(now - s.eventBufferUpdated) |
||||
if sinceUpdate >= maxServerEventRate*time.Duration(maxServerEventBuffer-s.serverEventBuffer) { |
||||
s.serverEventBuffer = maxServerEventBuffer |
||||
s.eventBufferUpdated = now |
||||
} else { |
||||
addBuffer := int(sinceUpdate / maxServerEventRate) |
||||
s.serverEventBuffer += addBuffer |
||||
s.eventBufferUpdated += mclock.AbsTime(maxServerEventRate * time.Duration(addBuffer)) |
||||
} |
||||
} |
||||
if s.serverEventBuffer > 0 { |
||||
s.serverEventBuffer-- |
||||
} else { |
||||
passEvent = false |
||||
} |
||||
} |
||||
childEventCb := s.childEventCb |
||||
s.lock.Unlock() |
||||
if passEvent { |
||||
childEventCb(event) |
||||
} |
||||
if sendCanRequestAgain { |
||||
childEventCb(Event{Type: EvCanRequestAgain}) |
||||
} |
||||
} |
||||
|
||||
// sendRequest sends a request through the parent (serverWithTimeout).
|
||||
func (s *serverWithLimits) sendRequest(request Request) (reqId ID) { |
||||
s.lock.Lock() |
||||
s.pendingCount++ |
||||
s.lock.Unlock() |
||||
return s.serverWithTimeout.sendRequest(request) |
||||
} |
||||
|
||||
// stop stops all goroutines associated with the server.
|
||||
func (s *serverWithLimits) unsubscribe() { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
if s.delayTimer != nil { |
||||
s.delayTimer.Stop() |
||||
s.delayTimer = nil |
||||
} |
||||
s.childEventCb = nil |
||||
s.serverWithTimeout.unsubscribe() |
||||
} |
||||
|
||||
// canRequest checks whether a new request can be started.
|
||||
func (s *serverWithLimits) canRequest() bool { |
||||
if s.delayTimer != nil || s.pendingCount >= int(s.parallelLimit) || s.timeoutCount > 0 { |
||||
return false |
||||
} |
||||
if s.parallelLimit < minParallelLimit { |
||||
s.parallelLimit = minParallelLimit |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// canRequestNow checks whether a new request can be started, according to the
|
||||
// current in-flight request count and parallelLimit, and also the failure delay
|
||||
// timer.
|
||||
// If it returns false then it is guaranteed that an EvCanRequestAgain will be
|
||||
// sent whenever the server becomes available for requesting again.
|
||||
func (s *serverWithLimits) canRequestNow() bool { |
||||
var sendCanRequestAgain bool |
||||
s.lock.Lock() |
||||
canRequest := s.canRequest() |
||||
if canRequest { |
||||
sendCanRequestAgain = s.sendEvent |
||||
s.sendEvent = false |
||||
} |
||||
childEventCb := s.childEventCb |
||||
s.lock.Unlock() |
||||
if sendCanRequestAgain { |
||||
childEventCb(Event{Type: EvCanRequestAgain}) |
||||
} |
||||
return canRequest |
||||
} |
||||
|
||||
// delay sets the delay timer to the given duration, disabling new requests for
|
||||
// the given period.
|
||||
func (s *serverWithLimits) delay(delay time.Duration) { |
||||
if s.delayTimer != nil { |
||||
// Note: if stopping the timer is unsuccessful then the resulting AfterFunc
|
||||
// call will just do nothing
|
||||
s.delayTimer.Stop() |
||||
s.delayTimer = nil |
||||
} |
||||
|
||||
s.delayCounter++ |
||||
delayCounter := s.delayCounter |
||||
log.Debug("Server delay started", "length", delay) |
||||
s.delayTimer = s.clock.AfterFunc(delay, func() { |
||||
log.Debug("Server delay ended", "length", delay) |
||||
var sendCanRequestAgain bool |
||||
s.lock.Lock() |
||||
if s.delayTimer != nil && s.delayCounter == delayCounter { // do nothing if there is a new timer now
|
||||
s.delayTimer = nil |
||||
if s.canRequest() { |
||||
sendCanRequestAgain = s.sendEvent |
||||
s.sendEvent = false |
||||
} |
||||
} |
||||
childEventCb := s.childEventCb |
||||
s.lock.Unlock() |
||||
if sendCanRequestAgain { |
||||
childEventCb(Event{Type: EvCanRequestAgain}) |
||||
} |
||||
}) |
||||
} |
||||
|
||||
// fail reports that a response from the server was found invalid by the processing
|
||||
// Module, disabling new requests for a dynamically adjused time period.
|
||||
func (s *serverWithLimits) fail(desc string) { |
||||
s.lock.Lock() |
||||
defer s.lock.Unlock() |
||||
|
||||
s.failLocked(desc) |
||||
} |
||||
|
||||
// failLocked calculates the dynamic failure delay and applies it.
|
||||
func (s *serverWithLimits) failLocked(desc string) { |
||||
log.Debug("Server error", "description", desc) |
||||
s.failureDelay *= 2 |
||||
now := s.clock.Now() |
||||
if now > s.failureDelayEnd { |
||||
s.failureDelay *= math.Pow(2, -float64(now-s.failureDelayEnd)/float64(maxFailureDelay)) |
||||
} |
||||
if s.failureDelay < float64(minFailureDelay) { |
||||
s.failureDelay = float64(minFailureDelay) |
||||
} |
||||
s.failureDelayEnd = now + mclock.AbsTime(s.failureDelay) |
||||
s.delay(time.Duration(s.failureDelay)) |
||||
} |
@ -0,0 +1,158 @@ |
||||
package request |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
) |
||||
|
||||
const ( |
||||
testRequest = "Life, the Universe, and Everything" |
||||
testResponse = 42 |
||||
) |
||||
|
||||
var testEventType = &EventType{Name: "testEvent"} |
||||
|
||||
func TestServerEvents(t *testing.T) { |
||||
rs := &testRequestServer{} |
||||
clock := &mclock.Simulated{} |
||||
srv := NewServer(rs, clock) |
||||
var lastEventType *EventType |
||||
srv.subscribe(func(event Event) { lastEventType = event.Type }) |
||||
evTypeName := func(evType *EventType) string { |
||||
if evType == nil { |
||||
return "none" |
||||
} |
||||
return evType.Name |
||||
} |
||||
expEvent := func(expType *EventType) { |
||||
if lastEventType != expType { |
||||
t.Errorf("Wrong event type (expected %s, got %s)", evTypeName(expType), evTypeName(lastEventType)) |
||||
} |
||||
lastEventType = nil |
||||
} |
||||
// user events should simply be passed through
|
||||
rs.eventCb(Event{Type: testEventType}) |
||||
expEvent(testEventType) |
||||
// send request, soft timeout, then valid response
|
||||
srv.sendRequest(testRequest) |
||||
clock.WaitForTimers(1) |
||||
clock.Run(softRequestTimeout) |
||||
expEvent(EvTimeout) |
||||
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}}) |
||||
expEvent(EvResponse) |
||||
// send request, hard timeout (response after hard timeout should be ignored)
|
||||
srv.sendRequest(testRequest) |
||||
clock.WaitForTimers(1) |
||||
clock.Run(softRequestTimeout) |
||||
expEvent(EvTimeout) |
||||
clock.WaitForTimers(1) |
||||
clock.Run(hardRequestTimeout) |
||||
expEvent(EvFail) |
||||
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}}) |
||||
expEvent(nil) |
||||
} |
||||
|
||||
func TestServerParallel(t *testing.T) { |
||||
rs := &testRequestServer{} |
||||
srv := NewServer(rs, &mclock.Simulated{}) |
||||
srv.subscribe(func(event Event) {}) |
||||
|
||||
expSend := func(expSent int) { |
||||
var sent int |
||||
for sent <= expSent { |
||||
if !srv.canRequestNow() { |
||||
break |
||||
} |
||||
sent++ |
||||
srv.sendRequest(testRequest) |
||||
} |
||||
if sent != expSent { |
||||
t.Errorf("Wrong number of parallel requests accepted (expected %d, got %d)", expSent, sent) |
||||
} |
||||
} |
||||
// max out parallel allowance
|
||||
expSend(defaultParallelLimit) |
||||
// 1 answered, should accept 1 more
|
||||
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}}) |
||||
expSend(1) |
||||
// 2 answered, should accept 2 more
|
||||
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 2, Request: testRequest, Response: testResponse}}) |
||||
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 3, Request: testRequest, Response: testResponse}}) |
||||
expSend(2) |
||||
// failed request, should decrease allowance and not accept more
|
||||
rs.eventCb(Event{Type: EvFail, Data: RequestResponse{ID: 4, Request: testRequest}}) |
||||
expSend(0) |
||||
srv.unsubscribe() |
||||
} |
||||
|
||||
func TestServerFail(t *testing.T) { |
||||
rs := &testRequestServer{} |
||||
clock := &mclock.Simulated{} |
||||
srv := NewServer(rs, clock) |
||||
srv.subscribe(func(event Event) {}) |
||||
expCanRequest := func(expCanRequest bool) { |
||||
if canRequest := srv.canRequestNow(); canRequest != expCanRequest { |
||||
t.Errorf("Wrong result for canRequestNow (expected %v, got %v)", expCanRequest, canRequest) |
||||
} |
||||
} |
||||
// timed out request
|
||||
expCanRequest(true) |
||||
srv.sendRequest(testRequest) |
||||
clock.WaitForTimers(1) |
||||
expCanRequest(true) |
||||
clock.Run(softRequestTimeout) |
||||
expCanRequest(false) // cannot request when there is a timed out request
|
||||
rs.eventCb(Event{Type: EvResponse, Data: RequestResponse{ID: 1, Request: testRequest, Response: testResponse}}) |
||||
expCanRequest(true) |
||||
// explicit server.Fail
|
||||
srv.fail("") |
||||
clock.WaitForTimers(1) |
||||
expCanRequest(false) // cannot request for a while after a failure
|
||||
clock.Run(minFailureDelay) |
||||
expCanRequest(true) |
||||
// request returned with EvFail
|
||||
srv.sendRequest(testRequest) |
||||
rs.eventCb(Event{Type: EvFail, Data: RequestResponse{ID: 2, Request: testRequest}}) |
||||
clock.WaitForTimers(1) |
||||
expCanRequest(false) // EvFail should also start failure delay
|
||||
clock.Run(minFailureDelay) |
||||
expCanRequest(false) // second failure delay is longer, should still be disabled
|
||||
clock.Run(minFailureDelay) |
||||
expCanRequest(true) |
||||
srv.unsubscribe() |
||||
} |
||||
|
||||
func TestServerEventRateLimit(t *testing.T) { |
||||
rs := &testRequestServer{} |
||||
clock := &mclock.Simulated{} |
||||
srv := NewServer(rs, clock) |
||||
var eventCount int |
||||
srv.subscribe(func(event Event) { |
||||
if !event.IsRequestEvent() { |
||||
eventCount++ |
||||
} |
||||
}) |
||||
expEvents := func(send, expAllowed int) { |
||||
eventCount = 0 |
||||
for sent := 0; sent < send; sent++ { |
||||
rs.eventCb(Event{Type: testEventType}) |
||||
} |
||||
if eventCount != expAllowed { |
||||
t.Errorf("Wrong number of server events passing rate limitation (sent %d, expected %d, got %d)", send, expAllowed, eventCount) |
||||
} |
||||
} |
||||
expEvents(maxServerEventBuffer+5, maxServerEventBuffer) |
||||
clock.Run(maxServerEventRate) |
||||
expEvents(5, 1) |
||||
clock.Run(maxServerEventRate * maxServerEventBuffer * 2) |
||||
expEvents(maxServerEventBuffer+5, maxServerEventBuffer) |
||||
} |
||||
|
||||
type testRequestServer struct { |
||||
eventCb func(Event) |
||||
} |
||||
|
||||
func (rs *testRequestServer) Subscribe(eventCb func(Event)) { rs.eventCb = eventCb } |
||||
func (rs *testRequestServer) SendRequest(ID, Request) {} |
||||
func (rs *testRequestServer) Unsubscribe() {} |
@ -0,0 +1,176 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package sync |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
) |
||||
|
||||
type headTracker interface { |
||||
ValidateHead(head types.SignedHeader) (bool, error) |
||||
ValidateFinality(head types.FinalityUpdate) (bool, error) |
||||
SetPrefetchHead(head types.HeadInfo) |
||||
} |
||||
|
||||
// HeadSync implements request.Module; it updates the validated and prefetch
|
||||
// heads of HeadTracker based on the EvHead and EvSignedHead events coming from
|
||||
// registered servers.
|
||||
// It can also postpone the validation of the latest announced signed head
|
||||
// until the committee chain is synced up to at least the required period.
|
||||
type HeadSync struct { |
||||
headTracker headTracker |
||||
chain committeeChain |
||||
nextSyncPeriod uint64 |
||||
chainInit bool |
||||
unvalidatedHeads map[request.Server]types.SignedHeader |
||||
unvalidatedFinality map[request.Server]types.FinalityUpdate |
||||
serverHeads map[request.Server]types.HeadInfo |
||||
headServerCount map[types.HeadInfo]headServerCount |
||||
headCounter uint64 |
||||
prefetchHead types.HeadInfo |
||||
} |
||||
|
||||
// headServerCount is associated with most recently seen head infos; it counts
|
||||
// the number of servers currently having the given head info as their announced
|
||||
// head and a counter signaling how recent that head is.
|
||||
// This data is used for selecting the prefetch head.
|
||||
type headServerCount struct { |
||||
serverCount int |
||||
headCounter uint64 |
||||
} |
||||
|
||||
// NewHeadSync creates a new HeadSync.
|
||||
func NewHeadSync(headTracker headTracker, chain committeeChain) *HeadSync { |
||||
s := &HeadSync{ |
||||
headTracker: headTracker, |
||||
chain: chain, |
||||
unvalidatedHeads: make(map[request.Server]types.SignedHeader), |
||||
unvalidatedFinality: make(map[request.Server]types.FinalityUpdate), |
||||
serverHeads: make(map[request.Server]types.HeadInfo), |
||||
headServerCount: make(map[types.HeadInfo]headServerCount), |
||||
} |
||||
return s |
||||
} |
||||
|
||||
// Process implements request.Module.
|
||||
func (s *HeadSync) Process(requester request.Requester, events []request.Event) { |
||||
for _, event := range events { |
||||
switch event.Type { |
||||
case EvNewHead: |
||||
s.setServerHead(event.Server, event.Data.(types.HeadInfo)) |
||||
case EvNewSignedHead: |
||||
s.newSignedHead(event.Server, event.Data.(types.SignedHeader)) |
||||
case EvNewFinalityUpdate: |
||||
s.newFinalityUpdate(event.Server, event.Data.(types.FinalityUpdate)) |
||||
case request.EvUnregistered: |
||||
s.setServerHead(event.Server, types.HeadInfo{}) |
||||
delete(s.serverHeads, event.Server) |
||||
delete(s.unvalidatedHeads, event.Server) |
||||
} |
||||
} |
||||
|
||||
nextPeriod, chainInit := s.chain.NextSyncPeriod() |
||||
if nextPeriod != s.nextSyncPeriod || chainInit != s.chainInit { |
||||
s.nextSyncPeriod, s.chainInit = nextPeriod, chainInit |
||||
s.processUnvalidated() |
||||
} |
||||
} |
||||
|
||||
// newSignedHead handles received signed head; either validates it if the chain
|
||||
// is properly synced or stores it for further validation.
|
||||
func (s *HeadSync) newSignedHead(server request.Server, signedHead types.SignedHeader) { |
||||
if !s.chainInit || types.SyncPeriod(signedHead.SignatureSlot) > s.nextSyncPeriod { |
||||
s.unvalidatedHeads[server] = signedHead |
||||
return |
||||
} |
||||
s.headTracker.ValidateHead(signedHead) |
||||
} |
||||
|
||||
// newSignedHead handles received signed head; either validates it if the chain
|
||||
// is properly synced or stores it for further validation.
|
||||
func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) { |
||||
if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod { |
||||
s.unvalidatedFinality[server] = finalityUpdate |
||||
return |
||||
} |
||||
s.headTracker.ValidateFinality(finalityUpdate) |
||||
} |
||||
|
||||
// processUnvalidatedHeads iterates the list of unvalidated heads and validates
|
||||
// those which can be validated.
|
||||
func (s *HeadSync) processUnvalidated() { |
||||
if !s.chainInit { |
||||
return |
||||
} |
||||
for server, signedHead := range s.unvalidatedHeads { |
||||
if types.SyncPeriod(signedHead.SignatureSlot) <= s.nextSyncPeriod { |
||||
s.headTracker.ValidateHead(signedHead) |
||||
delete(s.unvalidatedHeads, server) |
||||
} |
||||
} |
||||
for server, finalityUpdate := range s.unvalidatedFinality { |
||||
if types.SyncPeriod(finalityUpdate.SignatureSlot) <= s.nextSyncPeriod { |
||||
s.headTracker.ValidateFinality(finalityUpdate) |
||||
delete(s.unvalidatedFinality, server) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// setServerHead processes non-validated server head announcements and updates
|
||||
// the prefetch head if necessary.
|
||||
func (s *HeadSync) setServerHead(server request.Server, head types.HeadInfo) bool { |
||||
if oldHead, ok := s.serverHeads[server]; ok { |
||||
if head == oldHead { |
||||
return false |
||||
} |
||||
h := s.headServerCount[oldHead] |
||||
if h.serverCount--; h.serverCount > 0 { |
||||
s.headServerCount[oldHead] = h |
||||
} else { |
||||
delete(s.headServerCount, oldHead) |
||||
} |
||||
} |
||||
if head != (types.HeadInfo{}) { |
||||
h, ok := s.headServerCount[head] |
||||
if !ok { |
||||
s.headCounter++ |
||||
h.headCounter = s.headCounter |
||||
} |
||||
h.serverCount++ |
||||
s.headServerCount[head] = h |
||||
s.serverHeads[server] = head |
||||
} else { |
||||
delete(s.serverHeads, server) |
||||
} |
||||
var ( |
||||
bestHead types.HeadInfo |
||||
bestHeadInfo headServerCount |
||||
) |
||||
for head, headServerCount := range s.headServerCount { |
||||
if headServerCount.serverCount > bestHeadInfo.serverCount || |
||||
(headServerCount.serverCount == bestHeadInfo.serverCount && headServerCount.headCounter > bestHeadInfo.headCounter) { |
||||
bestHead, bestHeadInfo = head, headServerCount |
||||
} |
||||
} |
||||
if bestHead == s.prefetchHead { |
||||
return false |
||||
} |
||||
s.prefetchHead = bestHead |
||||
s.headTracker.SetPrefetchHead(bestHead) |
||||
return true |
||||
} |
@ -0,0 +1,151 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package sync |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
var ( |
||||
testServer1 = "testServer1" |
||||
testServer2 = "testServer2" |
||||
testServer3 = "testServer3" |
||||
testServer4 = "testServer4" |
||||
|
||||
testHead0 = types.HeadInfo{} |
||||
testHead1 = types.HeadInfo{Slot: 123, BlockRoot: common.Hash{1}} |
||||
testHead2 = types.HeadInfo{Slot: 124, BlockRoot: common.Hash{2}} |
||||
testHead3 = types.HeadInfo{Slot: 124, BlockRoot: common.Hash{3}} |
||||
testHead4 = types.HeadInfo{Slot: 125, BlockRoot: common.Hash{4}} |
||||
|
||||
testSHead1 = types.SignedHeader{SignatureSlot: 0x0124, Header: types.Header{Slot: 0x0123, StateRoot: common.Hash{1}}} |
||||
testSHead2 = types.SignedHeader{SignatureSlot: 0x2010, Header: types.Header{Slot: 0x200e, StateRoot: common.Hash{2}}} |
||||
// testSHead3 is at the end of period 1 but signed in period 2
|
||||
testSHead3 = types.SignedHeader{SignatureSlot: 0x4000, Header: types.Header{Slot: 0x3fff, StateRoot: common.Hash{3}}} |
||||
testSHead4 = types.SignedHeader{SignatureSlot: 0x6444, Header: types.Header{Slot: 0x6443, StateRoot: common.Hash{4}}} |
||||
) |
||||
|
||||
func TestValidatedHead(t *testing.T) { |
||||
chain := &TestCommitteeChain{} |
||||
ht := &TestHeadTracker{} |
||||
headSync := NewHeadSync(ht, chain) |
||||
ts := NewTestScheduler(t, headSync) |
||||
|
||||
ht.ExpValidated(t, 0, nil) |
||||
|
||||
ts.AddServer(testServer1, 1) |
||||
ts.ServerEvent(EvNewSignedHead, testServer1, testSHead1) |
||||
ts.Run(1) |
||||
// announced head should be queued because of uninitialized chain
|
||||
ht.ExpValidated(t, 1, nil) |
||||
|
||||
chain.SetNextSyncPeriod(0) // initialize chain
|
||||
ts.Run(2) |
||||
// expect previously queued head to be validated
|
||||
ht.ExpValidated(t, 2, []types.SignedHeader{testSHead1}) |
||||
|
||||
chain.SetNextSyncPeriod(1) |
||||
ts.ServerEvent(EvNewSignedHead, testServer1, testSHead2) |
||||
ts.AddServer(testServer2, 1) |
||||
ts.ServerEvent(EvNewSignedHead, testServer2, testSHead2) |
||||
ts.Run(3) |
||||
// expect both head announcements to be validated instantly
|
||||
ht.ExpValidated(t, 3, []types.SignedHeader{testSHead2, testSHead2}) |
||||
|
||||
ts.ServerEvent(EvNewSignedHead, testServer1, testSHead3) |
||||
ts.AddServer(testServer3, 1) |
||||
ts.ServerEvent(EvNewSignedHead, testServer3, testSHead4) |
||||
ts.Run(4) |
||||
// future period annonced heads should be queued
|
||||
ht.ExpValidated(t, 4, nil) |
||||
|
||||
chain.SetNextSyncPeriod(2) |
||||
ts.Run(5) |
||||
// testSHead3 can be validated now but not testSHead4
|
||||
ht.ExpValidated(t, 5, []types.SignedHeader{testSHead3}) |
||||
|
||||
// server 3 disconnected without proving period 3, its announced head should be dropped
|
||||
ts.RemoveServer(testServer3) |
||||
ts.Run(6) |
||||
ht.ExpValidated(t, 6, nil) |
||||
|
||||
chain.SetNextSyncPeriod(3) |
||||
ts.Run(7) |
||||
// testSHead4 could be validated now but it's not queued by any registered server
|
||||
ht.ExpValidated(t, 7, nil) |
||||
|
||||
ts.ServerEvent(EvNewSignedHead, testServer2, testSHead4) |
||||
ts.Run(8) |
||||
// now testSHead4 should be validated
|
||||
ht.ExpValidated(t, 8, []types.SignedHeader{testSHead4}) |
||||
} |
||||
|
||||
func TestPrefetchHead(t *testing.T) { |
||||
chain := &TestCommitteeChain{} |
||||
ht := &TestHeadTracker{} |
||||
headSync := NewHeadSync(ht, chain) |
||||
ts := NewTestScheduler(t, headSync) |
||||
|
||||
ht.ExpPrefetch(t, 0, testHead0) // no servers registered
|
||||
|
||||
ts.AddServer(testServer1, 1) |
||||
ts.ServerEvent(EvNewHead, testServer1, testHead1) |
||||
ts.Run(1) |
||||
ht.ExpPrefetch(t, 1, testHead1) // s1: h1
|
||||
|
||||
ts.AddServer(testServer2, 1) |
||||
ts.ServerEvent(EvNewHead, testServer2, testHead2) |
||||
ts.Run(2) |
||||
ht.ExpPrefetch(t, 2, testHead2) // s1: h1, s2: h2
|
||||
|
||||
ts.ServerEvent(EvNewHead, testServer1, testHead2) |
||||
ts.Run(3) |
||||
ht.ExpPrefetch(t, 3, testHead2) // s1: h2, s2: h2
|
||||
|
||||
ts.AddServer(testServer3, 1) |
||||
ts.ServerEvent(EvNewHead, testServer3, testHead3) |
||||
ts.Run(4) |
||||
ht.ExpPrefetch(t, 4, testHead2) // s1: h2, s2: h2, s3: h3
|
||||
|
||||
ts.AddServer(testServer4, 1) |
||||
ts.ServerEvent(EvNewHead, testServer4, testHead4) |
||||
ts.Run(5) |
||||
ht.ExpPrefetch(t, 5, testHead2) // s1: h2, s2: h2, s3: h3, s4: h4
|
||||
|
||||
ts.ServerEvent(EvNewHead, testServer2, testHead3) |
||||
ts.Run(6) |
||||
ht.ExpPrefetch(t, 6, testHead3) // s1: h2, s2: h3, s3: h3, s4: h4
|
||||
|
||||
ts.RemoveServer(testServer3) |
||||
ts.Run(7) |
||||
ht.ExpPrefetch(t, 7, testHead4) // s1: h2, s2: h3, s4: h4
|
||||
|
||||
ts.RemoveServer(testServer1) |
||||
ts.Run(8) |
||||
ht.ExpPrefetch(t, 8, testHead4) // s2: h3, s4: h4
|
||||
|
||||
ts.RemoveServer(testServer4) |
||||
ts.Run(9) |
||||
ht.ExpPrefetch(t, 9, testHead3) // s2: h3
|
||||
|
||||
ts.RemoveServer(testServer2) |
||||
ts.Run(10) |
||||
ht.ExpPrefetch(t, 10, testHead0) // no servers registered
|
||||
} |
@ -0,0 +1,254 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package sync |
||||
|
||||
import ( |
||||
"reflect" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/light" |
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
) |
||||
|
||||
type requestWithID struct { |
||||
sid request.ServerAndID |
||||
request request.Request |
||||
} |
||||
|
||||
type TestScheduler struct { |
||||
t *testing.T |
||||
module request.Module |
||||
events []request.Event |
||||
servers []request.Server |
||||
allowance map[request.Server]int |
||||
sent map[int][]requestWithID |
||||
testIndex int |
||||
expFail map[request.Server]int // expected Server.Fail calls during next Run
|
||||
lastId request.ID |
||||
} |
||||
|
||||
func NewTestScheduler(t *testing.T, module request.Module) *TestScheduler { |
||||
return &TestScheduler{ |
||||
t: t, |
||||
module: module, |
||||
allowance: make(map[request.Server]int), |
||||
expFail: make(map[request.Server]int), |
||||
sent: make(map[int][]requestWithID), |
||||
} |
||||
} |
||||
|
||||
func (ts *TestScheduler) Run(testIndex int, exp ...any) { |
||||
expReqs := make([]requestWithID, len(exp)/2) |
||||
id := ts.lastId |
||||
for i := range expReqs { |
||||
id++ |
||||
expReqs[i] = requestWithID{ |
||||
sid: request.ServerAndID{Server: exp[i*2].(request.Server), ID: id}, |
||||
request: exp[i*2+1].(request.Request), |
||||
} |
||||
} |
||||
if len(expReqs) == 0 { |
||||
expReqs = nil |
||||
} |
||||
|
||||
ts.testIndex = testIndex |
||||
ts.module.Process(ts, ts.events) |
||||
ts.events = nil |
||||
|
||||
for server, count := range ts.expFail { |
||||
delete(ts.expFail, server) |
||||
if count == 0 { |
||||
continue |
||||
} |
||||
ts.t.Errorf("Missing %d Server.Fail(s) from server %s in test case #%d", count, server.(string), testIndex) |
||||
} |
||||
|
||||
if !reflect.DeepEqual(ts.sent[testIndex], expReqs) { |
||||
ts.t.Errorf("Wrong sent requests in test case #%d (expected %v, got %v)", testIndex, expReqs, ts.sent[testIndex]) |
||||
} |
||||
} |
||||
|
||||
func (ts *TestScheduler) CanSendTo() (cs []request.Server) { |
||||
for _, server := range ts.servers { |
||||
if ts.allowance[server] > 0 { |
||||
cs = append(cs, server) |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func (ts *TestScheduler) Send(server request.Server, req request.Request) request.ID { |
||||
ts.lastId++ |
||||
ts.sent[ts.testIndex] = append(ts.sent[ts.testIndex], requestWithID{ |
||||
sid: request.ServerAndID{Server: server, ID: ts.lastId}, |
||||
request: req, |
||||
}) |
||||
ts.allowance[server]-- |
||||
return ts.lastId |
||||
} |
||||
|
||||
func (ts *TestScheduler) Fail(server request.Server, desc string) { |
||||
if ts.expFail[server] == 0 { |
||||
ts.t.Errorf("Unexpected Fail from server %s in test case #%d: %s", server.(string), ts.testIndex, desc) |
||||
return |
||||
} |
||||
ts.expFail[server]-- |
||||
} |
||||
|
||||
func (ts *TestScheduler) Request(testIndex, reqIndex int) requestWithID { |
||||
if len(ts.sent[testIndex]) < reqIndex { |
||||
ts.t.Errorf("Missing request from test case %d index %d", testIndex, reqIndex) |
||||
return requestWithID{} |
||||
} |
||||
return ts.sent[testIndex][reqIndex-1] |
||||
} |
||||
|
||||
func (ts *TestScheduler) ServerEvent(evType *request.EventType, server request.Server, data any) { |
||||
ts.events = append(ts.events, request.Event{ |
||||
Type: evType, |
||||
Server: server, |
||||
Data: data, |
||||
}) |
||||
} |
||||
|
||||
func (ts *TestScheduler) RequestEvent(evType *request.EventType, req requestWithID, resp request.Response) { |
||||
if req.request == nil { |
||||
return |
||||
} |
||||
ts.events = append(ts.events, request.Event{ |
||||
Type: evType, |
||||
Server: req.sid.Server, |
||||
Data: request.RequestResponse{ |
||||
ID: req.sid.ID, |
||||
Request: req.request, |
||||
Response: resp, |
||||
}, |
||||
}) |
||||
} |
||||
|
||||
func (ts *TestScheduler) AddServer(server request.Server, allowance int) { |
||||
ts.servers = append(ts.servers, server) |
||||
ts.allowance[server] = allowance |
||||
ts.ServerEvent(request.EvRegistered, server, nil) |
||||
} |
||||
|
||||
func (ts *TestScheduler) RemoveServer(server request.Server) { |
||||
ts.servers = append(ts.servers, server) |
||||
for i, s := range ts.servers { |
||||
if s == server { |
||||
copy(ts.servers[i:len(ts.servers)-1], ts.servers[i+1:]) |
||||
ts.servers = ts.servers[:len(ts.servers)-1] |
||||
break |
||||
} |
||||
} |
||||
delete(ts.allowance, server) |
||||
ts.ServerEvent(request.EvUnregistered, server, nil) |
||||
} |
||||
|
||||
func (ts *TestScheduler) AddAllowance(server request.Server, allowance int) { |
||||
ts.allowance[server] += allowance |
||||
} |
||||
|
||||
func (ts *TestScheduler) ExpFail(server request.Server) { |
||||
ts.expFail[server]++ |
||||
} |
||||
|
||||
type TestCommitteeChain struct { |
||||
fsp, nsp uint64 |
||||
init bool |
||||
} |
||||
|
||||
func (t *TestCommitteeChain) CheckpointInit(bootstrap types.BootstrapData) error { |
||||
t.fsp, t.nsp, t.init = bootstrap.Header.SyncPeriod(), bootstrap.Header.SyncPeriod()+2, true |
||||
return nil |
||||
} |
||||
|
||||
func (t *TestCommitteeChain) InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error { |
||||
period := update.AttestedHeader.Header.SyncPeriod() |
||||
if period < t.fsp || period > t.nsp || !t.init { |
||||
return light.ErrInvalidPeriod |
||||
} |
||||
if period == t.nsp { |
||||
t.nsp++ |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (t *TestCommitteeChain) NextSyncPeriod() (uint64, bool) { |
||||
return t.nsp, t.init |
||||
} |
||||
|
||||
func (tc *TestCommitteeChain) ExpInit(t *testing.T, ExpInit bool) { |
||||
if tc.init != ExpInit { |
||||
t.Errorf("Incorrect init flag (expected %v, got %v)", ExpInit, tc.init) |
||||
} |
||||
} |
||||
|
||||
func (t *TestCommitteeChain) SetNextSyncPeriod(nsp uint64) { |
||||
t.init, t.nsp = true, nsp |
||||
} |
||||
|
||||
func (tc *TestCommitteeChain) ExpNextSyncPeriod(t *testing.T, expNsp uint64) { |
||||
tc.ExpInit(t, true) |
||||
if tc.nsp != expNsp { |
||||
t.Errorf("Incorrect NextSyncPeriod (expected %d, got %d)", expNsp, tc.nsp) |
||||
} |
||||
} |
||||
|
||||
type TestHeadTracker struct { |
||||
phead types.HeadInfo |
||||
validated []types.SignedHeader |
||||
} |
||||
|
||||
func (ht *TestHeadTracker) ValidateHead(head types.SignedHeader) (bool, error) { |
||||
ht.validated = append(ht.validated, head) |
||||
return true, nil |
||||
} |
||||
|
||||
// TODO add test case for finality
|
||||
func (ht *TestHeadTracker) ValidateFinality(head types.FinalityUpdate) (bool, error) { |
||||
return true, nil |
||||
} |
||||
|
||||
func (ht *TestHeadTracker) ExpValidated(t *testing.T, tci int, expHeads []types.SignedHeader) { |
||||
for i, expHead := range expHeads { |
||||
if i >= len(ht.validated) { |
||||
t.Errorf("Missing validated head in test case #%d index #%d (expected {slot %d blockRoot %x}, got none)", tci, i, expHead.Header.Slot, expHead.Header.Hash()) |
||||
continue |
||||
} |
||||
if ht.validated[i] != expHead { |
||||
vhead := ht.validated[i].Header |
||||
t.Errorf("Wrong validated head in test case #%d index #%d (expected {slot %d blockRoot %x}, got {slot %d blockRoot %x})", tci, i, expHead.Header.Slot, expHead.Header.Hash(), vhead.Slot, vhead.Hash()) |
||||
} |
||||
} |
||||
for i := len(expHeads); i < len(ht.validated); i++ { |
||||
vhead := ht.validated[i].Header |
||||
t.Errorf("Unexpected validated head in test case #%d index #%d (expected none, got {slot %d blockRoot %x})", tci, i, vhead.Slot, vhead.Hash()) |
||||
} |
||||
ht.validated = nil |
||||
} |
||||
|
||||
func (ht *TestHeadTracker) SetPrefetchHead(head types.HeadInfo) { |
||||
ht.phead = head |
||||
} |
||||
|
||||
func (ht *TestHeadTracker) ExpPrefetch(t *testing.T, tci int, exp types.HeadInfo) { |
||||
if ht.phead != exp { |
||||
t.Errorf("Wrong prefetch head in test case #%d (expected {slot %d blockRoot %x}, got {slot %d blockRoot %x})", tci, exp.Slot, exp.BlockRoot, ht.phead.Slot, ht.phead.BlockRoot) |
||||
} |
||||
} |
@ -0,0 +1,42 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package sync |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
var ( |
||||
EvNewHead = &request.EventType{Name: "newHead"} // data: types.HeadInfo
|
||||
EvNewSignedHead = &request.EventType{Name: "newSignedHead"} // data: types.SignedHeader
|
||||
EvNewFinalityUpdate = &request.EventType{Name: "newFinalityUpdate"} // data: types.FinalityUpdate
|
||||
) |
||||
|
||||
type ( |
||||
ReqUpdates struct { |
||||
FirstPeriod, Count uint64 |
||||
} |
||||
RespUpdates struct { |
||||
Updates []*types.LightClientUpdate |
||||
Committees []*types.SerializedSyncCommittee |
||||
} |
||||
ReqHeader common.Hash |
||||
ReqCheckpointData common.Hash |
||||
ReqBeaconBlock common.Hash |
||||
) |
@ -0,0 +1,299 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package sync |
||||
|
||||
import ( |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/light" |
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
const maxUpdateRequest = 8 // maximum number of updates requested in a single request
|
||||
|
||||
type committeeChain interface { |
||||
CheckpointInit(bootstrap types.BootstrapData) error |
||||
InsertUpdate(update *types.LightClientUpdate, nextCommittee *types.SerializedSyncCommittee) error |
||||
NextSyncPeriod() (uint64, bool) |
||||
} |
||||
|
||||
// CheckpointInit implements request.Module; it fetches the light client bootstrap
|
||||
// data belonging to the given checkpoint hash and initializes the committee chain
|
||||
// if successful.
|
||||
type CheckpointInit struct { |
||||
chain committeeChain |
||||
checkpointHash common.Hash |
||||
locked request.ServerAndID |
||||
initialized bool |
||||
} |
||||
|
||||
// NewCheckpointInit creates a new CheckpointInit.
|
||||
func NewCheckpointInit(chain committeeChain, checkpointHash common.Hash) *CheckpointInit { |
||||
return &CheckpointInit{ |
||||
chain: chain, |
||||
checkpointHash: checkpointHash, |
||||
} |
||||
} |
||||
|
||||
// Process implements request.Module.
|
||||
func (s *CheckpointInit) Process(requester request.Requester, events []request.Event) { |
||||
for _, event := range events { |
||||
if !event.IsRequestEvent() { |
||||
continue |
||||
} |
||||
sid, req, resp := event.RequestInfo() |
||||
if s.locked == sid { |
||||
s.locked = request.ServerAndID{} |
||||
} |
||||
if resp != nil { |
||||
if checkpoint := resp.(*types.BootstrapData); checkpoint.Header.Hash() == common.Hash(req.(ReqCheckpointData)) { |
||||
s.chain.CheckpointInit(*checkpoint) |
||||
s.initialized = true |
||||
return |
||||
} |
||||
|
||||
requester.Fail(event.Server, "invalid checkpoint data") |
||||
} |
||||
} |
||||
// start a request if possible
|
||||
if s.initialized || s.locked != (request.ServerAndID{}) { |
||||
return |
||||
} |
||||
cs := requester.CanSendTo() |
||||
if len(cs) == 0 { |
||||
return |
||||
} |
||||
server := cs[0] |
||||
id := requester.Send(server, ReqCheckpointData(s.checkpointHash)) |
||||
s.locked = request.ServerAndID{Server: server, ID: id} |
||||
} |
||||
|
||||
// ForwardUpdateSync implements request.Module; it fetches updates between the
|
||||
// committee chain head and each server's announced head. Updates are fetched
|
||||
// in batches and multiple batches can also be requested in parallel.
|
||||
// Out of order responses are also handled; if a batch of updates cannot be added
|
||||
// to the chain immediately because of a gap then the future updates are
|
||||
// remembered until they can be processed.
|
||||
type ForwardUpdateSync struct { |
||||
chain committeeChain |
||||
rangeLock rangeLock |
||||
lockedIDs map[request.ServerAndID]struct{} |
||||
processQueue []updateResponse |
||||
nextSyncPeriod map[request.Server]uint64 |
||||
} |
||||
|
||||
// NewForwardUpdateSync creates a new ForwardUpdateSync.
|
||||
func NewForwardUpdateSync(chain committeeChain) *ForwardUpdateSync { |
||||
return &ForwardUpdateSync{ |
||||
chain: chain, |
||||
rangeLock: make(rangeLock), |
||||
lockedIDs: make(map[request.ServerAndID]struct{}), |
||||
nextSyncPeriod: make(map[request.Server]uint64), |
||||
} |
||||
} |
||||
|
||||
// rangeLock allows locking sections of an integer space, preventing the syncing
|
||||
// mechanism from making requests again for sections where a not timed out request
|
||||
// is already pending or where already fetched and unprocessed data is available.
|
||||
type rangeLock map[uint64]int |
||||
|
||||
// lock locks or unlocks the given section, depending on the sign of the add parameter.
|
||||
func (r rangeLock) lock(first, count uint64, add int) { |
||||
for i := first; i < first+count; i++ { |
||||
if v := r[i] + add; v > 0 { |
||||
r[i] = v |
||||
} else { |
||||
delete(r, i) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// firstUnlocked returns the first unlocked section starting at or after start
|
||||
// and not longer than maxCount.
|
||||
func (r rangeLock) firstUnlocked(start, maxCount uint64) (first, count uint64) { |
||||
first = start |
||||
for { |
||||
if _, ok := r[first]; !ok { |
||||
break |
||||
} |
||||
first++ |
||||
} |
||||
for { |
||||
count++ |
||||
if count == maxCount { |
||||
break |
||||
} |
||||
if _, ok := r[first+count]; ok { |
||||
break |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// lockRange locks the range belonging to the given update request, unless the
|
||||
// same request has already been locked
|
||||
func (s *ForwardUpdateSync) lockRange(sid request.ServerAndID, req ReqUpdates) { |
||||
if _, ok := s.lockedIDs[sid]; ok { |
||||
return |
||||
} |
||||
s.lockedIDs[sid] = struct{}{} |
||||
s.rangeLock.lock(req.FirstPeriod, req.Count, 1) |
||||
} |
||||
|
||||
// unlockRange unlocks the range belonging to the given update request, unless
|
||||
// same request has already been unlocked
|
||||
func (s *ForwardUpdateSync) unlockRange(sid request.ServerAndID, req ReqUpdates) { |
||||
if _, ok := s.lockedIDs[sid]; !ok { |
||||
return |
||||
} |
||||
delete(s.lockedIDs, sid) |
||||
s.rangeLock.lock(req.FirstPeriod, req.Count, -1) |
||||
} |
||||
|
||||
// verifyRange returns true if the number of updates and the individual update
|
||||
// periods in the response match the requested section.
|
||||
func (s *ForwardUpdateSync) verifyRange(request ReqUpdates, response RespUpdates) bool { |
||||
if uint64(len(response.Updates)) != request.Count || uint64(len(response.Committees)) != request.Count { |
||||
return false |
||||
} |
||||
for i, update := range response.Updates { |
||||
if update.AttestedHeader.Header.SyncPeriod() != request.FirstPeriod+uint64(i) { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// updateResponse is a response that has passed initial verification and has been
|
||||
// queued for processing. Note that an update response cannot be processed until
|
||||
// the previous updates have also been added to the chain.
|
||||
type updateResponse struct { |
||||
sid request.ServerAndID |
||||
request ReqUpdates |
||||
response RespUpdates |
||||
} |
||||
|
||||
// updateResponseList implements sort.Sort and sorts update request/response events by FirstPeriod.
|
||||
type updateResponseList []updateResponse |
||||
|
||||
func (u updateResponseList) Len() int { return len(u) } |
||||
func (u updateResponseList) Swap(i, j int) { u[i], u[j] = u[j], u[i] } |
||||
func (u updateResponseList) Less(i, j int) bool { |
||||
return u[i].request.FirstPeriod < u[j].request.FirstPeriod |
||||
} |
||||
|
||||
// Process implements request.Module.
|
||||
func (s *ForwardUpdateSync) Process(requester request.Requester, events []request.Event) { |
||||
for _, event := range events { |
||||
switch event.Type { |
||||
case request.EvResponse, request.EvFail, request.EvTimeout: |
||||
sid, rq, rs := event.RequestInfo() |
||||
req := rq.(ReqUpdates) |
||||
var queued bool |
||||
if event.Type == request.EvResponse { |
||||
resp := rs.(RespUpdates) |
||||
if s.verifyRange(req, resp) { |
||||
// there is a response with a valid format; put it in the process queue
|
||||
s.processQueue = append(s.processQueue, updateResponse{sid: sid, request: req, response: resp}) |
||||
s.lockRange(sid, req) |
||||
queued = true |
||||
} else { |
||||
requester.Fail(event.Server, "invalid update range") |
||||
} |
||||
} |
||||
if !queued { |
||||
s.unlockRange(sid, req) |
||||
} |
||||
case EvNewSignedHead: |
||||
signedHead := event.Data.(types.SignedHeader) |
||||
s.nextSyncPeriod[event.Server] = types.SyncPeriod(signedHead.SignatureSlot + 256) |
||||
case request.EvUnregistered: |
||||
delete(s.nextSyncPeriod, event.Server) |
||||
} |
||||
} |
||||
|
||||
// try processing ordered list of available responses
|
||||
sort.Sort(updateResponseList(s.processQueue)) |
||||
for s.processQueue != nil { |
||||
u := s.processQueue[0] |
||||
if !s.processResponse(requester, u) { |
||||
break |
||||
} |
||||
s.unlockRange(u.sid, u.request) |
||||
s.processQueue = s.processQueue[1:] |
||||
if len(s.processQueue) == 0 { |
||||
s.processQueue = nil |
||||
} |
||||
} |
||||
|
||||
// start new requests if possible
|
||||
startPeriod, chainInit := s.chain.NextSyncPeriod() |
||||
if !chainInit { |
||||
return |
||||
} |
||||
for { |
||||
firstPeriod, maxCount := s.rangeLock.firstUnlocked(startPeriod, maxUpdateRequest) |
||||
var ( |
||||
sendTo request.Server |
||||
bestCount uint64 |
||||
) |
||||
for _, server := range requester.CanSendTo() { |
||||
nextPeriod := s.nextSyncPeriod[server] |
||||
if nextPeriod <= firstPeriod { |
||||
continue |
||||
} |
||||
count := maxCount |
||||
if nextPeriod < firstPeriod+maxCount { |
||||
count = nextPeriod - firstPeriod |
||||
} |
||||
if count > bestCount { |
||||
sendTo, bestCount = server, count |
||||
} |
||||
} |
||||
if sendTo == nil { |
||||
return |
||||
} |
||||
req := ReqUpdates{FirstPeriod: firstPeriod, Count: bestCount} |
||||
id := requester.Send(sendTo, req) |
||||
s.lockRange(request.ServerAndID{Server: sendTo, ID: id}, req) |
||||
} |
||||
} |
||||
|
||||
// processResponse adds the fetched updates and committees to the committee chain.
|
||||
// Returns true in case of full or partial success.
|
||||
func (s *ForwardUpdateSync) processResponse(requester request.Requester, u updateResponse) (success bool) { |
||||
for i, update := range u.response.Updates { |
||||
if err := s.chain.InsertUpdate(update, u.response.Committees[i]); err != nil { |
||||
if err == light.ErrInvalidPeriod { |
||||
// there is a gap in the update periods; stop processing without
|
||||
// failing and try again next time
|
||||
return |
||||
} |
||||
if err == light.ErrInvalidUpdate || err == light.ErrWrongCommitteeRoot || err == light.ErrCannotReorg { |
||||
requester.Fail(u.sid.Server, "invalid update received") |
||||
} else { |
||||
log.Error("Unexpected InsertUpdate error", "error", err) |
||||
} |
||||
return |
||||
} |
||||
success = true |
||||
} |
||||
return |
||||
} |
@ -0,0 +1,219 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package sync |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/light/request" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
) |
||||
|
||||
func TestCheckpointInit(t *testing.T) { |
||||
chain := &TestCommitteeChain{} |
||||
checkpoint := &types.BootstrapData{Header: types.Header{Slot: 0x2000*4 + 0x1000}} // period 4
|
||||
checkpointHash := checkpoint.Header.Hash() |
||||
chkInit := NewCheckpointInit(chain, checkpointHash) |
||||
ts := NewTestScheduler(t, chkInit) |
||||
// add 2 servers
|
||||
ts.AddServer(testServer1, 1) |
||||
ts.AddServer(testServer2, 1) |
||||
|
||||
// expect bootstrap request to server 1
|
||||
ts.Run(1, testServer1, ReqCheckpointData(checkpointHash)) |
||||
|
||||
// server 1 times out; expect request to server 2
|
||||
ts.RequestEvent(request.EvTimeout, ts.Request(1, 1), nil) |
||||
ts.Run(2, testServer2, ReqCheckpointData(checkpointHash)) |
||||
|
||||
// invalid response from server 2; expect init state to still be false
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), &types.BootstrapData{Header: types.Header{Slot: 123456}}) |
||||
ts.ExpFail(testServer2) |
||||
ts.Run(3) |
||||
chain.ExpInit(t, false) |
||||
|
||||
// server 1 fails (hard timeout)
|
||||
ts.RequestEvent(request.EvFail, ts.Request(1, 1), nil) |
||||
ts.Run(4) |
||||
chain.ExpInit(t, false) |
||||
|
||||
// server 3 is registered; expect bootstrap request to server 3
|
||||
ts.AddServer(testServer3, 1) |
||||
ts.Run(5, testServer3, ReqCheckpointData(checkpointHash)) |
||||
|
||||
// valid response from server 3; expect chain to be initialized
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(5, 1), checkpoint) |
||||
ts.Run(6) |
||||
chain.ExpInit(t, true) |
||||
} |
||||
|
||||
func TestUpdateSyncParallel(t *testing.T) { |
||||
chain := &TestCommitteeChain{} |
||||
chain.SetNextSyncPeriod(0) |
||||
updateSync := NewForwardUpdateSync(chain) |
||||
ts := NewTestScheduler(t, updateSync) |
||||
// add 2 servers, head at period 100; allow 3-3 parallel requests for each
|
||||
ts.AddServer(testServer1, 3) |
||||
ts.ServerEvent(EvNewSignedHead, testServer1, types.SignedHeader{SignatureSlot: 0x2000*100 + 0x1000}) |
||||
ts.AddServer(testServer2, 3) |
||||
ts.ServerEvent(EvNewSignedHead, testServer2, types.SignedHeader{SignatureSlot: 0x2000*100 + 0x1000}) |
||||
|
||||
// expect 6 requests to be sent
|
||||
ts.Run(1, |
||||
testServer1, ReqUpdates{FirstPeriod: 0, Count: 8}, |
||||
testServer1, ReqUpdates{FirstPeriod: 8, Count: 8}, |
||||
testServer1, ReqUpdates{FirstPeriod: 16, Count: 8}, |
||||
testServer2, ReqUpdates{FirstPeriod: 24, Count: 8}, |
||||
testServer2, ReqUpdates{FirstPeriod: 32, Count: 8}, |
||||
testServer2, ReqUpdates{FirstPeriod: 40, Count: 8}) |
||||
|
||||
// valid response to request 1; expect 8 periods synced and a new request started
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(1, 1), testRespUpdate(ts.Request(1, 1))) |
||||
ts.AddAllowance(testServer1, 1) |
||||
ts.Run(2, testServer1, ReqUpdates{FirstPeriod: 48, Count: 8}) |
||||
chain.ExpNextSyncPeriod(t, 8) |
||||
|
||||
// valid response to requests 4 and 5
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(1, 4), testRespUpdate(ts.Request(1, 4))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(1, 5), testRespUpdate(ts.Request(1, 5))) |
||||
ts.AddAllowance(testServer2, 2) |
||||
// expect 2 more requests but no sync progress (responses 4 and 5 cannot be added before 2 and 3)
|
||||
ts.Run(3, |
||||
testServer2, ReqUpdates{FirstPeriod: 56, Count: 8}, |
||||
testServer2, ReqUpdates{FirstPeriod: 64, Count: 8}) |
||||
chain.ExpNextSyncPeriod(t, 8) |
||||
|
||||
// soft timeout for requests 2 and 3 (server 1 is overloaded)
|
||||
ts.RequestEvent(request.EvTimeout, ts.Request(1, 2), nil) |
||||
ts.RequestEvent(request.EvTimeout, ts.Request(1, 3), nil) |
||||
// no allowance, no more requests
|
||||
ts.Run(4) |
||||
|
||||
// valid response to requests 6 and 8 and 9
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(1, 6), testRespUpdate(ts.Request(1, 6))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(3, 1), testRespUpdate(ts.Request(3, 1))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(3, 2), testRespUpdate(ts.Request(3, 2))) |
||||
ts.AddAllowance(testServer2, 3) |
||||
// server 2 can now resend requests 2 and 3 (timed out by server 1) and also send a new one
|
||||
ts.Run(5, |
||||
testServer2, ReqUpdates{FirstPeriod: 8, Count: 8}, |
||||
testServer2, ReqUpdates{FirstPeriod: 16, Count: 8}, |
||||
testServer2, ReqUpdates{FirstPeriod: 72, Count: 8}) |
||||
|
||||
// server 1 finally answers timed out request 2
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(1, 2), testRespUpdate(ts.Request(1, 2))) |
||||
ts.AddAllowance(testServer1, 1) |
||||
// expect sync progress and one new request
|
||||
ts.Run(6, testServer1, ReqUpdates{FirstPeriod: 80, Count: 8}) |
||||
chain.ExpNextSyncPeriod(t, 16) |
||||
|
||||
// server 2 answers requests 11 and 12 (resends of requests 2 and 3)
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(5, 1), testRespUpdate(ts.Request(5, 1))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(5, 2), testRespUpdate(ts.Request(5, 2))) |
||||
ts.AddAllowance(testServer2, 2) |
||||
ts.Run(7, |
||||
testServer2, ReqUpdates{FirstPeriod: 88, Count: 8}, |
||||
testServer2, ReqUpdates{FirstPeriod: 96, Count: 4}) |
||||
// finally the gap is filled, update can process responses up to req6
|
||||
chain.ExpNextSyncPeriod(t, 48) |
||||
|
||||
// all remaining requests are answered
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(1, 3), testRespUpdate(ts.Request(1, 3))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testRespUpdate(ts.Request(2, 1))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(5, 3), testRespUpdate(ts.Request(5, 3))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(6, 1), testRespUpdate(ts.Request(6, 1))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testRespUpdate(ts.Request(7, 1))) |
||||
ts.RequestEvent(request.EvResponse, ts.Request(7, 2), testRespUpdate(ts.Request(7, 2))) |
||||
ts.Run(8) |
||||
// expect chain to be fully synced
|
||||
chain.ExpNextSyncPeriod(t, 100) |
||||
} |
||||
|
||||
func TestUpdateSyncDifferentHeads(t *testing.T) { |
||||
chain := &TestCommitteeChain{} |
||||
chain.SetNextSyncPeriod(10) |
||||
updateSync := NewForwardUpdateSync(chain) |
||||
ts := NewTestScheduler(t, updateSync) |
||||
// add 3 servers with different announced head periods
|
||||
ts.AddServer(testServer1, 1) |
||||
ts.ServerEvent(EvNewSignedHead, testServer1, types.SignedHeader{SignatureSlot: 0x2000*15 + 0x1000}) |
||||
ts.AddServer(testServer2, 1) |
||||
ts.ServerEvent(EvNewSignedHead, testServer2, types.SignedHeader{SignatureSlot: 0x2000*16 + 0x1000}) |
||||
ts.AddServer(testServer3, 1) |
||||
ts.ServerEvent(EvNewSignedHead, testServer3, types.SignedHeader{SignatureSlot: 0x2000*17 + 0x1000}) |
||||
|
||||
// expect request to the best announced head
|
||||
ts.Run(1, testServer3, ReqUpdates{FirstPeriod: 10, Count: 7}) |
||||
|
||||
// request times out, expect request to the next best head
|
||||
ts.RequestEvent(request.EvTimeout, ts.Request(1, 1), nil) |
||||
ts.Run(2, testServer2, ReqUpdates{FirstPeriod: 10, Count: 6}) |
||||
|
||||
// request times out, expect request to the last available server
|
||||
ts.RequestEvent(request.EvTimeout, ts.Request(2, 1), nil) |
||||
ts.Run(3, testServer1, ReqUpdates{FirstPeriod: 10, Count: 5}) |
||||
|
||||
// valid response to request 3, expect chain synced to period 15
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(3, 1), testRespUpdate(ts.Request(3, 1))) |
||||
ts.AddAllowance(testServer1, 1) |
||||
ts.Run(4) |
||||
chain.ExpNextSyncPeriod(t, 15) |
||||
|
||||
// invalid response to request 1, server can only deliver updates up to period 15 despite announced head
|
||||
truncated := ts.Request(1, 1) |
||||
truncated.request = ReqUpdates{FirstPeriod: 10, Count: 5} |
||||
ts.RequestEvent(request.EvResponse, ts.Request(1, 1), testRespUpdate(truncated)) |
||||
ts.ExpFail(testServer3) |
||||
ts.Run(5) |
||||
// expect no progress of chain head
|
||||
chain.ExpNextSyncPeriod(t, 15) |
||||
|
||||
// valid response to request 2, expect chain synced to period 16
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(2, 1), testRespUpdate(ts.Request(2, 1))) |
||||
ts.AddAllowance(testServer2, 1) |
||||
ts.Run(6) |
||||
chain.ExpNextSyncPeriod(t, 16) |
||||
|
||||
// a new server is registered with announced head period 17
|
||||
ts.AddServer(testServer4, 1) |
||||
ts.ServerEvent(EvNewSignedHead, testServer4, types.SignedHeader{SignatureSlot: 0x2000*17 + 0x1000}) |
||||
// expect request to sync one more period
|
||||
ts.Run(7, testServer4, ReqUpdates{FirstPeriod: 16, Count: 1}) |
||||
|
||||
// valid response, expect chain synced to period 17
|
||||
ts.RequestEvent(request.EvResponse, ts.Request(7, 1), testRespUpdate(ts.Request(7, 1))) |
||||
ts.AddAllowance(testServer4, 1) |
||||
ts.Run(8) |
||||
chain.ExpNextSyncPeriod(t, 17) |
||||
} |
||||
|
||||
func testRespUpdate(request requestWithID) request.Response { |
||||
var resp RespUpdates |
||||
if request.request == nil { |
||||
return resp |
||||
} |
||||
req := request.request.(ReqUpdates) |
||||
resp.Updates = make([]*types.LightClientUpdate, int(req.Count)) |
||||
resp.Committees = make([]*types.SerializedSyncCommittee, int(req.Count)) |
||||
period := req.FirstPeriod |
||||
for i := range resp.Updates { |
||||
resp.Updates[i] = &types.LightClientUpdate{AttestedHeader: types.SignedHeader{Header: types.Header{Slot: 0x2000*period + 0x1000}}} |
||||
resp.Committees[i] = new(types.SerializedSyncCommittee) |
||||
period++ |
||||
} |
||||
return resp |
||||
} |
@ -0,0 +1,69 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/engine" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
) |
||||
|
||||
func updateEngineApi(client *rpc.Client, headCh chan types.ChainHeadEvent) { |
||||
for event := range headCh { |
||||
if client == nil { // dry run, no engine API specified
|
||||
log.Info("New execution block retrieved", "block number", event.HeadBlock.Number, "block hash", event.HeadBlock.BlockHash, "finalized block hash", event.Finalized) |
||||
} else { |
||||
if status, err := callNewPayloadV2(client, event.HeadBlock); err == nil { |
||||
log.Info("Successful NewPayload", "block number", event.HeadBlock.Number, "block hash", event.HeadBlock.BlockHash, "status", status) |
||||
} else { |
||||
log.Error("Failed NewPayload", "block number", event.HeadBlock.Number, "block hash", event.HeadBlock.BlockHash, "error", err) |
||||
} |
||||
if status, err := callForkchoiceUpdatedV1(client, event.HeadBlock.BlockHash, event.Finalized); err == nil { |
||||
log.Info("Successful ForkchoiceUpdated", "head", event.HeadBlock.BlockHash, "status", status) |
||||
} else { |
||||
log.Error("Failed ForkchoiceUpdated", "head", event.HeadBlock.BlockHash, "error", err) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func callNewPayloadV2(client *rpc.Client, execData *engine.ExecutableData) (string, error) { |
||||
var resp engine.PayloadStatusV1 |
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) |
||||
err := client.CallContext(ctx, &resp, "engine_newPayloadV2", execData) |
||||
cancel() |
||||
return resp.Status, err |
||||
} |
||||
|
||||
func callForkchoiceUpdatedV1(client *rpc.Client, headHash, finalizedHash common.Hash) (string, error) { |
||||
var resp engine.ForkChoiceResponse |
||||
update := engine.ForkchoiceStateV1{ |
||||
HeadBlockHash: headHash, |
||||
SafeBlockHash: finalizedHash, |
||||
FinalizedBlockHash: finalizedHash, |
||||
} |
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) |
||||
err := client.CallContext(ctx, &resp, "engine_forkchoiceUpdatedV1", update, nil) |
||||
cancel() |
||||
return resp.PayloadStatus.Status, err |
||||
} |
@ -0,0 +1,125 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/beacon/blsync" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/internal/flags" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/mattn/go-colorable" |
||||
"github.com/mattn/go-isatty" |
||||
"github.com/urfave/cli/v2" |
||||
) |
||||
|
||||
var ( |
||||
verbosityFlag = &cli.IntFlag{ |
||||
Name: "verbosity", |
||||
Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail", |
||||
Value: 3, |
||||
Category: flags.LoggingCategory, |
||||
} |
||||
vmoduleFlag = &cli.StringFlag{ |
||||
Name: "vmodule", |
||||
Usage: "Per-module verbosity: comma-separated list of <pattern>=<level> (e.g. eth/*=5,p2p=4)", |
||||
Value: "", |
||||
Hidden: true, |
||||
Category: flags.LoggingCategory, |
||||
} |
||||
) |
||||
|
||||
func main() { |
||||
app := flags.NewApp("beacon light syncer tool") |
||||
app.Flags = []cli.Flag{ |
||||
utils.BeaconApiFlag, |
||||
utils.BeaconApiHeaderFlag, |
||||
utils.BeaconThresholdFlag, |
||||
utils.BeaconNoFilterFlag, |
||||
utils.BeaconConfigFlag, |
||||
utils.BeaconGenesisRootFlag, |
||||
utils.BeaconGenesisTimeFlag, |
||||
utils.BeaconCheckpointFlag, |
||||
//TODO datadir for optional permanent database
|
||||
utils.MainnetFlag, |
||||
utils.SepoliaFlag, |
||||
utils.GoerliFlag, |
||||
utils.BlsyncApiFlag, |
||||
utils.BlsyncJWTSecretFlag, |
||||
verbosityFlag, |
||||
vmoduleFlag, |
||||
} |
||||
app.Action = sync |
||||
|
||||
if err := app.Run(os.Args); err != nil { |
||||
fmt.Fprintln(os.Stderr, err) |
||||
os.Exit(1) |
||||
} |
||||
} |
||||
|
||||
func sync(ctx *cli.Context) error { |
||||
usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb" |
||||
output := io.Writer(os.Stderr) |
||||
if usecolor { |
||||
output = colorable.NewColorable(os.Stderr) |
||||
} |
||||
verbosity := log.FromLegacyLevel(ctx.Int(verbosityFlag.Name)) |
||||
log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(output, verbosity, usecolor))) |
||||
|
||||
headCh := make(chan types.ChainHeadEvent, 16) |
||||
client := blsync.NewClient(ctx) |
||||
sub := client.SubscribeChainHeadEvent(headCh) |
||||
go updateEngineApi(makeRPCClient(ctx), headCh) |
||||
client.Start() |
||||
// run until stopped
|
||||
<-ctx.Done() |
||||
client.Stop() |
||||
sub.Unsubscribe() |
||||
close(headCh) |
||||
return nil |
||||
} |
||||
|
||||
func makeRPCClient(ctx *cli.Context) *rpc.Client { |
||||
if !ctx.IsSet(utils.BlsyncApiFlag.Name) { |
||||
log.Warn("No engine API target specified, performing a dry run") |
||||
return nil |
||||
} |
||||
if !ctx.IsSet(utils.BlsyncJWTSecretFlag.Name) { |
||||
utils.Fatalf("JWT secret parameter missing") //TODO use default if datadir is specified
|
||||
} |
||||
|
||||
engineApiUrl, jwtFileName := ctx.String(utils.BlsyncApiFlag.Name), ctx.String(utils.BlsyncJWTSecretFlag.Name) |
||||
var jwtSecret [32]byte |
||||
if jwt, err := node.ObtainJWTSecret(jwtFileName); err == nil { |
||||
copy(jwtSecret[:], jwt) |
||||
} else { |
||||
utils.Fatalf("Error loading or generating JWT secret: %v", err) |
||||
} |
||||
auth := node.NewJWTAuth(jwtSecret) |
||||
cl, err := rpc.DialOptions(context.Background(), engineApiUrl, rpc.WithHTTPAuth(auth)) |
||||
if err != nil { |
||||
utils.Fatalf("Could not create RPC client: %v", err) |
||||
} |
||||
return cl |
||||
} |
@ -0,0 +1,88 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package catalyst |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/beacon/engine" |
||||
"github.com/ethereum/go-ethereum/beacon/types" |
||||
"github.com/ethereum/go-ethereum/eth" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
// Blsync tracks the head of the beacon chain through the beacon light client
|
||||
// and drives the local node via ConsensusAPI.
|
||||
type Blsync struct { |
||||
engine *ConsensusAPI |
||||
client Client |
||||
headCh chan types.ChainHeadEvent |
||||
headSub event.Subscription |
||||
|
||||
quitCh chan struct{} |
||||
} |
||||
|
||||
type Client interface { |
||||
SubscribeChainHeadEvent(ch chan<- types.ChainHeadEvent) event.Subscription |
||||
Start() |
||||
Stop() |
||||
} |
||||
|
||||
// NewBlsync creates a new beacon light syncer.
|
||||
func NewBlsync(client Client, eth *eth.Ethereum) *Blsync { |
||||
return &Blsync{ |
||||
engine: newConsensusAPIWithoutHeartbeat(eth), |
||||
client: client, |
||||
headCh: make(chan types.ChainHeadEvent, 16), |
||||
quitCh: make(chan struct{}), |
||||
} |
||||
} |
||||
|
||||
// Start starts underlying beacon light client and the sync logic for driving
|
||||
// the local node.
|
||||
func (b *Blsync) Start() error { |
||||
log.Info("Beacon light sync started") |
||||
b.headSub = b.client.SubscribeChainHeadEvent(b.headCh) |
||||
go b.client.Start() |
||||
|
||||
for { |
||||
select { |
||||
case <-b.quitCh: |
||||
return nil |
||||
case head := <-b.headCh: |
||||
if _, err := b.engine.NewPayloadV2(*head.HeadBlock); err != nil { |
||||
log.Error("failed to send new payload", "err", err) |
||||
continue |
||||
} |
||||
update := engine.ForkchoiceStateV1{ |
||||
HeadBlockHash: head.HeadBlock.BlockHash, |
||||
SafeBlockHash: head.Finalized, //TODO pass finalized or empty hash here?
|
||||
FinalizedBlockHash: head.Finalized, |
||||
} |
||||
if _, err := b.engine.ForkchoiceUpdatedV1(update, nil); err != nil { |
||||
log.Error("failed to send forkchoice updated", "err", err) |
||||
continue |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Stop signals to the light client and syncer to exit.
|
||||
func (b *Blsync) Stop() error { |
||||
b.client.Stop() |
||||
close(b.quitCh) |
||||
return nil |
||||
} |
Loading…
Reference in new issue