mirror of https://github.com/ethereum/go-ethereum
parent
4aee0d1994
commit
2ed729d38e
@ -0,0 +1,401 @@ |
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package les |
||||||
|
|
||||||
|
import ( |
||||||
|
"math/big" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/common/mclock" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/eth/downloader" |
||||||
|
"github.com/ethereum/go-ethereum/light" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/p2p" |
||||||
|
"github.com/ethereum/go-ethereum/params" |
||||||
|
) |
||||||
|
|
||||||
|
// clientHandler is responsible for receiving and processing all incoming server
|
||||||
|
// responses.
|
||||||
|
type clientHandler struct { |
||||||
|
ulc *ulc |
||||||
|
checkpoint *params.TrustedCheckpoint |
||||||
|
fetcher *lightFetcher |
||||||
|
downloader *downloader.Downloader |
||||||
|
backend *LightEthereum |
||||||
|
|
||||||
|
closeCh chan struct{} |
||||||
|
wg sync.WaitGroup // WaitGroup used to track all connected peers.
|
||||||
|
} |
||||||
|
|
||||||
|
func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.TrustedCheckpoint, backend *LightEthereum) *clientHandler { |
||||||
|
handler := &clientHandler{ |
||||||
|
backend: backend, |
||||||
|
closeCh: make(chan struct{}), |
||||||
|
} |
||||||
|
if ulcServers != nil { |
||||||
|
ulc, err := newULC(ulcServers, ulcFraction) |
||||||
|
if err != nil { |
||||||
|
log.Error("Failed to initialize ultra light client") |
||||||
|
} |
||||||
|
handler.ulc = ulc |
||||||
|
log.Info("Enable ultra light client mode") |
||||||
|
} |
||||||
|
var height uint64 |
||||||
|
if checkpoint != nil { |
||||||
|
height = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1 |
||||||
|
} |
||||||
|
handler.fetcher = newLightFetcher(handler) |
||||||
|
handler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer) |
||||||
|
handler.backend.peers.notify((*downloaderPeerNotify)(handler)) |
||||||
|
return handler |
||||||
|
} |
||||||
|
|
||||||
|
func (h *clientHandler) stop() { |
||||||
|
close(h.closeCh) |
||||||
|
h.downloader.Terminate() |
||||||
|
h.fetcher.close() |
||||||
|
h.wg.Wait() |
||||||
|
} |
||||||
|
|
||||||
|
// runPeer is the p2p protocol run function for the given version.
|
||||||
|
func (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error { |
||||||
|
trusted := false |
||||||
|
if h.ulc != nil { |
||||||
|
trusted = h.ulc.trusted(p.ID()) |
||||||
|
} |
||||||
|
peer := newPeer(int(version), h.backend.config.NetworkId, trusted, p, newMeteredMsgWriter(rw, int(version))) |
||||||
|
peer.poolEntry = h.backend.serverPool.connect(peer, peer.Node()) |
||||||
|
if peer.poolEntry == nil { |
||||||
|
return p2p.DiscRequested |
||||||
|
} |
||||||
|
h.wg.Add(1) |
||||||
|
defer h.wg.Done() |
||||||
|
err := h.handle(peer) |
||||||
|
h.backend.serverPool.disconnect(peer.poolEntry) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
func (h *clientHandler) handle(p *peer) error { |
||||||
|
if h.backend.peers.Len() >= h.backend.config.LightPeers && !p.Peer.Info().Network.Trusted { |
||||||
|
return p2p.DiscTooManyPeers |
||||||
|
} |
||||||
|
p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) |
||||||
|
|
||||||
|
// Execute the LES handshake
|
||||||
|
var ( |
||||||
|
head = h.backend.blockchain.CurrentHeader() |
||||||
|
hash = head.Hash() |
||||||
|
number = head.Number.Uint64() |
||||||
|
td = h.backend.blockchain.GetTd(hash, number) |
||||||
|
) |
||||||
|
if err := p.Handshake(td, hash, number, h.backend.blockchain.Genesis().Hash(), nil); err != nil { |
||||||
|
p.Log().Debug("Light Ethereum handshake failed", "err", err) |
||||||
|
return err |
||||||
|
} |
||||||
|
// Register the peer locally
|
||||||
|
if err := h.backend.peers.Register(p); err != nil { |
||||||
|
p.Log().Error("Light Ethereum peer registration failed", "err", err) |
||||||
|
return err |
||||||
|
} |
||||||
|
serverConnectionGauge.Update(int64(h.backend.peers.Len())) |
||||||
|
|
||||||
|
connectedAt := mclock.Now() |
||||||
|
defer func() { |
||||||
|
h.backend.peers.Unregister(p.id) |
||||||
|
connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) |
||||||
|
serverConnectionGauge.Update(int64(h.backend.peers.Len())) |
||||||
|
}() |
||||||
|
|
||||||
|
h.fetcher.announce(p, p.headInfo) |
||||||
|
|
||||||
|
// pool entry can be nil during the unit test.
|
||||||
|
if p.poolEntry != nil { |
||||||
|
h.backend.serverPool.registered(p.poolEntry) |
||||||
|
} |
||||||
|
// Spawn a main loop to handle all incoming messages.
|
||||||
|
for { |
||||||
|
if err := h.handleMsg(p); err != nil { |
||||||
|
p.Log().Debug("Light Ethereum message handling failed", "err", err) |
||||||
|
p.fcServer.DumpLogs() |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// handleMsg is invoked whenever an inbound message is received from a remote
|
||||||
|
// peer. The remote connection is torn down upon returning any error.
|
||||||
|
func (h *clientHandler) handleMsg(p *peer) error { |
||||||
|
// Read the next message from the remote peer, and ensure it's fully consumed
|
||||||
|
msg, err := p.rw.ReadMsg() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size) |
||||||
|
|
||||||
|
if msg.Size > ProtocolMaxMsgSize { |
||||||
|
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) |
||||||
|
} |
||||||
|
defer msg.Discard() |
||||||
|
|
||||||
|
var deliverMsg *Msg |
||||||
|
|
||||||
|
// Handle the message depending on its contents
|
||||||
|
switch msg.Code { |
||||||
|
case AnnounceMsg: |
||||||
|
p.Log().Trace("Received announce message") |
||||||
|
var req announceData |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
return errResp(ErrDecode, "%v: %v", msg, err) |
||||||
|
} |
||||||
|
if err := req.sanityCheck(); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
update, size := req.Update.decode() |
||||||
|
if p.rejectUpdate(size) { |
||||||
|
return errResp(ErrRequestRejected, "") |
||||||
|
} |
||||||
|
p.updateFlowControl(update) |
||||||
|
|
||||||
|
if req.Hash != (common.Hash{}) { |
||||||
|
if p.announceType == announceTypeNone { |
||||||
|
return errResp(ErrUnexpectedResponse, "") |
||||||
|
} |
||||||
|
if p.announceType == announceTypeSigned { |
||||||
|
if err := req.checkSignature(p.ID(), update); err != nil { |
||||||
|
p.Log().Trace("Invalid announcement signature", "err", err) |
||||||
|
return err |
||||||
|
} |
||||||
|
p.Log().Trace("Valid announcement signature") |
||||||
|
} |
||||||
|
p.Log().Trace("Announce message content", "number", req.Number, "hash", req.Hash, "td", req.Td, "reorg", req.ReorgDepth) |
||||||
|
h.fetcher.announce(p, &req) |
||||||
|
} |
||||||
|
case BlockHeadersMsg: |
||||||
|
p.Log().Trace("Received block header response message") |
||||||
|
var resp struct { |
||||||
|
ReqID, BV uint64 |
||||||
|
Headers []*types.Header |
||||||
|
} |
||||||
|
if err := msg.Decode(&resp); err != nil { |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
p.fcServer.ReceivedReply(resp.ReqID, resp.BV) |
||||||
|
if h.fetcher.requestedID(resp.ReqID) { |
||||||
|
h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers) |
||||||
|
} else { |
||||||
|
if err := h.downloader.DeliverHeaders(p.id, resp.Headers); err != nil { |
||||||
|
log.Debug("Failed to deliver headers", "err", err) |
||||||
|
} |
||||||
|
} |
||||||
|
case BlockBodiesMsg: |
||||||
|
p.Log().Trace("Received block bodies response") |
||||||
|
var resp struct { |
||||||
|
ReqID, BV uint64 |
||||||
|
Data []*types.Body |
||||||
|
} |
||||||
|
if err := msg.Decode(&resp); err != nil { |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
p.fcServer.ReceivedReply(resp.ReqID, resp.BV) |
||||||
|
deliverMsg = &Msg{ |
||||||
|
MsgType: MsgBlockBodies, |
||||||
|
ReqID: resp.ReqID, |
||||||
|
Obj: resp.Data, |
||||||
|
} |
||||||
|
case CodeMsg: |
||||||
|
p.Log().Trace("Received code response") |
||||||
|
var resp struct { |
||||||
|
ReqID, BV uint64 |
||||||
|
Data [][]byte |
||||||
|
} |
||||||
|
if err := msg.Decode(&resp); err != nil { |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
p.fcServer.ReceivedReply(resp.ReqID, resp.BV) |
||||||
|
deliverMsg = &Msg{ |
||||||
|
MsgType: MsgCode, |
||||||
|
ReqID: resp.ReqID, |
||||||
|
Obj: resp.Data, |
||||||
|
} |
||||||
|
case ReceiptsMsg: |
||||||
|
p.Log().Trace("Received receipts response") |
||||||
|
var resp struct { |
||||||
|
ReqID, BV uint64 |
||||||
|
Receipts []types.Receipts |
||||||
|
} |
||||||
|
if err := msg.Decode(&resp); err != nil { |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
p.fcServer.ReceivedReply(resp.ReqID, resp.BV) |
||||||
|
deliverMsg = &Msg{ |
||||||
|
MsgType: MsgReceipts, |
||||||
|
ReqID: resp.ReqID, |
||||||
|
Obj: resp.Receipts, |
||||||
|
} |
||||||
|
case ProofsV2Msg: |
||||||
|
p.Log().Trace("Received les/2 proofs response") |
||||||
|
var resp struct { |
||||||
|
ReqID, BV uint64 |
||||||
|
Data light.NodeList |
||||||
|
} |
||||||
|
if err := msg.Decode(&resp); err != nil { |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
p.fcServer.ReceivedReply(resp.ReqID, resp.BV) |
||||||
|
deliverMsg = &Msg{ |
||||||
|
MsgType: MsgProofsV2, |
||||||
|
ReqID: resp.ReqID, |
||||||
|
Obj: resp.Data, |
||||||
|
} |
||||||
|
case HelperTrieProofsMsg: |
||||||
|
p.Log().Trace("Received helper trie proof response") |
||||||
|
var resp struct { |
||||||
|
ReqID, BV uint64 |
||||||
|
Data HelperTrieResps |
||||||
|
} |
||||||
|
if err := msg.Decode(&resp); err != nil { |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
p.fcServer.ReceivedReply(resp.ReqID, resp.BV) |
||||||
|
deliverMsg = &Msg{ |
||||||
|
MsgType: MsgHelperTrieProofs, |
||||||
|
ReqID: resp.ReqID, |
||||||
|
Obj: resp.Data, |
||||||
|
} |
||||||
|
case TxStatusMsg: |
||||||
|
p.Log().Trace("Received tx status response") |
||||||
|
var resp struct { |
||||||
|
ReqID, BV uint64 |
||||||
|
Status []light.TxStatus |
||||||
|
} |
||||||
|
if err := msg.Decode(&resp); err != nil { |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
p.fcServer.ReceivedReply(resp.ReqID, resp.BV) |
||||||
|
deliverMsg = &Msg{ |
||||||
|
MsgType: MsgTxStatus, |
||||||
|
ReqID: resp.ReqID, |
||||||
|
Obj: resp.Status, |
||||||
|
} |
||||||
|
case StopMsg: |
||||||
|
p.freezeServer(true) |
||||||
|
h.backend.retriever.frozen(p) |
||||||
|
p.Log().Debug("Service stopped") |
||||||
|
case ResumeMsg: |
||||||
|
var bv uint64 |
||||||
|
if err := msg.Decode(&bv); err != nil { |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
p.fcServer.ResumeFreeze(bv) |
||||||
|
p.freezeServer(false) |
||||||
|
p.Log().Debug("Service resumed") |
||||||
|
default: |
||||||
|
p.Log().Trace("Received invalid message", "code", msg.Code) |
||||||
|
return errResp(ErrInvalidMsgCode, "%v", msg.Code) |
||||||
|
} |
||||||
|
// Deliver the received response to retriever.
|
||||||
|
if deliverMsg != nil { |
||||||
|
if err := h.backend.retriever.deliver(p, deliverMsg); err != nil { |
||||||
|
p.responseErrors++ |
||||||
|
if p.responseErrors > maxResponseErrors { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (h *clientHandler) removePeer(id string) { |
||||||
|
h.backend.peers.Unregister(id) |
||||||
|
} |
||||||
|
|
||||||
|
type peerConnection struct { |
||||||
|
handler *clientHandler |
||||||
|
peer *peer |
||||||
|
} |
||||||
|
|
||||||
|
func (pc *peerConnection) Head() (common.Hash, *big.Int) { |
||||||
|
return pc.peer.HeadAndTd() |
||||||
|
} |
||||||
|
|
||||||
|
func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { |
||||||
|
rq := &distReq{ |
||||||
|
getCost: func(dp distPeer) uint64 { |
||||||
|
peer := dp.(*peer) |
||||||
|
return peer.GetRequestCost(GetBlockHeadersMsg, amount) |
||||||
|
}, |
||||||
|
canSend: func(dp distPeer) bool { |
||||||
|
return dp.(*peer) == pc.peer |
||||||
|
}, |
||||||
|
request: func(dp distPeer) func() { |
||||||
|
reqID := genReqID() |
||||||
|
peer := dp.(*peer) |
||||||
|
cost := peer.GetRequestCost(GetBlockHeadersMsg, amount) |
||||||
|
peer.fcServer.QueuedRequest(reqID, cost) |
||||||
|
return func() { peer.RequestHeadersByHash(reqID, cost, origin, amount, skip, reverse) } |
||||||
|
}, |
||||||
|
} |
||||||
|
_, ok := <-pc.handler.backend.reqDist.queue(rq) |
||||||
|
if !ok { |
||||||
|
return light.ErrNoPeers |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { |
||||||
|
rq := &distReq{ |
||||||
|
getCost: func(dp distPeer) uint64 { |
||||||
|
peer := dp.(*peer) |
||||||
|
return peer.GetRequestCost(GetBlockHeadersMsg, amount) |
||||||
|
}, |
||||||
|
canSend: func(dp distPeer) bool { |
||||||
|
return dp.(*peer) == pc.peer |
||||||
|
}, |
||||||
|
request: func(dp distPeer) func() { |
||||||
|
reqID := genReqID() |
||||||
|
peer := dp.(*peer) |
||||||
|
cost := peer.GetRequestCost(GetBlockHeadersMsg, amount) |
||||||
|
peer.fcServer.QueuedRequest(reqID, cost) |
||||||
|
return func() { peer.RequestHeadersByNumber(reqID, cost, origin, amount, skip, reverse) } |
||||||
|
}, |
||||||
|
} |
||||||
|
_, ok := <-pc.handler.backend.reqDist.queue(rq) |
||||||
|
if !ok { |
||||||
|
return light.ErrNoPeers |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// downloaderPeerNotify implements peerSetNotify
|
||||||
|
type downloaderPeerNotify clientHandler |
||||||
|
|
||||||
|
func (d *downloaderPeerNotify) registerPeer(p *peer) { |
||||||
|
h := (*clientHandler)(d) |
||||||
|
pc := &peerConnection{ |
||||||
|
handler: h, |
||||||
|
peer: p, |
||||||
|
} |
||||||
|
h.downloader.RegisterLightPeer(p.id, ethVersion, pc) |
||||||
|
} |
||||||
|
|
||||||
|
func (d *downloaderPeerNotify) unregisterPeer(p *peer) { |
||||||
|
h := (*clientHandler)(d) |
||||||
|
h.downloader.UnregisterPeer(p.id) |
||||||
|
} |
@ -1,168 +0,0 @@ |
|||||||
// Copyright 2019 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package les |
|
||||||
|
|
||||||
import ( |
|
||||||
"math/big" |
|
||||||
"testing" |
|
||||||
|
|
||||||
"net" |
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common" |
|
||||||
"github.com/ethereum/go-ethereum/core/types" |
|
||||||
"github.com/ethereum/go-ethereum/crypto" |
|
||||||
"github.com/ethereum/go-ethereum/p2p" |
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode" |
|
||||||
) |
|
||||||
|
|
||||||
func TestFetcherULCPeerSelector(t *testing.T) { |
|
||||||
id1 := newNodeID(t).ID() |
|
||||||
id2 := newNodeID(t).ID() |
|
||||||
id3 := newNodeID(t).ID() |
|
||||||
id4 := newNodeID(t).ID() |
|
||||||
|
|
||||||
ftn1 := &fetcherTreeNode{ |
|
||||||
hash: common.HexToHash("1"), |
|
||||||
td: big.NewInt(1), |
|
||||||
} |
|
||||||
ftn2 := &fetcherTreeNode{ |
|
||||||
hash: common.HexToHash("2"), |
|
||||||
td: big.NewInt(2), |
|
||||||
parent: ftn1, |
|
||||||
} |
|
||||||
ftn3 := &fetcherTreeNode{ |
|
||||||
hash: common.HexToHash("3"), |
|
||||||
td: big.NewInt(3), |
|
||||||
parent: ftn2, |
|
||||||
} |
|
||||||
lf := lightFetcher{ |
|
||||||
pm: &ProtocolManager{ |
|
||||||
ulc: &ulc{ |
|
||||||
keys: map[string]bool{ |
|
||||||
id1.String(): true, |
|
||||||
id2.String(): true, |
|
||||||
id3.String(): true, |
|
||||||
id4.String(): true, |
|
||||||
}, |
|
||||||
fraction: 70, |
|
||||||
}, |
|
||||||
}, |
|
||||||
maxConfirmedTd: ftn1.td, |
|
||||||
|
|
||||||
peers: map[*peer]*fetcherPeerInfo{ |
|
||||||
{ |
|
||||||
id: "peer1", |
|
||||||
Peer: p2p.NewPeer(id1, "peer1", []p2p.Cap{}), |
|
||||||
trusted: true, |
|
||||||
}: { |
|
||||||
nodeByHash: map[common.Hash]*fetcherTreeNode{ |
|
||||||
ftn1.hash: ftn1, |
|
||||||
ftn2.hash: ftn2, |
|
||||||
}, |
|
||||||
}, |
|
||||||
{ |
|
||||||
Peer: p2p.NewPeer(id2, "peer2", []p2p.Cap{}), |
|
||||||
id: "peer2", |
|
||||||
trusted: true, |
|
||||||
}: { |
|
||||||
nodeByHash: map[common.Hash]*fetcherTreeNode{ |
|
||||||
ftn1.hash: ftn1, |
|
||||||
ftn2.hash: ftn2, |
|
||||||
}, |
|
||||||
}, |
|
||||||
{ |
|
||||||
id: "peer3", |
|
||||||
Peer: p2p.NewPeer(id3, "peer3", []p2p.Cap{}), |
|
||||||
trusted: true, |
|
||||||
}: { |
|
||||||
nodeByHash: map[common.Hash]*fetcherTreeNode{ |
|
||||||
ftn1.hash: ftn1, |
|
||||||
ftn2.hash: ftn2, |
|
||||||
ftn3.hash: ftn3, |
|
||||||
}, |
|
||||||
}, |
|
||||||
{ |
|
||||||
id: "peer4", |
|
||||||
Peer: p2p.NewPeer(id4, "peer4", []p2p.Cap{}), |
|
||||||
trusted: true, |
|
||||||
}: { |
|
||||||
nodeByHash: map[common.Hash]*fetcherTreeNode{ |
|
||||||
ftn1.hash: ftn1, |
|
||||||
}, |
|
||||||
}, |
|
||||||
}, |
|
||||||
chain: &lightChainStub{ |
|
||||||
tds: map[common.Hash]*big.Int{}, |
|
||||||
headers: map[common.Hash]*types.Header{ |
|
||||||
ftn1.hash: {}, |
|
||||||
ftn2.hash: {}, |
|
||||||
ftn3.hash: {}, |
|
||||||
}, |
|
||||||
}, |
|
||||||
} |
|
||||||
bestHash, bestAmount, bestTD, sync := lf.findBestRequest() |
|
||||||
|
|
||||||
if bestTD == nil { |
|
||||||
t.Fatal("Empty result") |
|
||||||
} |
|
||||||
|
|
||||||
if bestTD.Cmp(ftn2.td) != 0 { |
|
||||||
t.Fatal("bad td", bestTD) |
|
||||||
} |
|
||||||
if bestHash != ftn2.hash { |
|
||||||
t.Fatal("bad hash", bestTD) |
|
||||||
} |
|
||||||
|
|
||||||
_, _ = bestAmount, sync |
|
||||||
} |
|
||||||
|
|
||||||
type lightChainStub struct { |
|
||||||
BlockChain |
|
||||||
tds map[common.Hash]*big.Int |
|
||||||
headers map[common.Hash]*types.Header |
|
||||||
insertHeaderChainAssertFunc func(chain []*types.Header, checkFreq int) (int, error) |
|
||||||
} |
|
||||||
|
|
||||||
func (l *lightChainStub) GetHeader(hash common.Hash, number uint64) *types.Header { |
|
||||||
if h, ok := l.headers[hash]; ok { |
|
||||||
return h |
|
||||||
} |
|
||||||
|
|
||||||
return nil |
|
||||||
} |
|
||||||
|
|
||||||
func (l *lightChainStub) LockChain() {} |
|
||||||
func (l *lightChainStub) UnlockChain() {} |
|
||||||
|
|
||||||
func (l *lightChainStub) GetTd(hash common.Hash, number uint64) *big.Int { |
|
||||||
if td, ok := l.tds[hash]; ok { |
|
||||||
return td |
|
||||||
} |
|
||||||
return nil |
|
||||||
} |
|
||||||
|
|
||||||
func (l *lightChainStub) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { |
|
||||||
return l.insertHeaderChainAssertFunc(chain, checkFreq) |
|
||||||
} |
|
||||||
|
|
||||||
func newNodeID(t *testing.T) *enode.Node { |
|
||||||
key, err := crypto.GenerateKey() |
|
||||||
if err != nil { |
|
||||||
t.Fatal("generate key err:", err) |
|
||||||
} |
|
||||||
return enode.NewV4(&key.PublicKey, net.IP{}, 35000, 35000) |
|
||||||
} |
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,921 @@ |
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package les |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"sync" |
||||||
|
"sync/atomic" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/common/mclock" |
||||||
|
"github.com/ethereum/go-ethereum/core" |
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb" |
||||||
|
"github.com/ethereum/go-ethereum/core/state" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/light" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/metrics" |
||||||
|
"github.com/ethereum/go-ethereum/p2p" |
||||||
|
"github.com/ethereum/go-ethereum/rlp" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
|
||||||
|
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
|
||||||
|
ethVersion = 63 // equivalent eth version for the downloader
|
||||||
|
|
||||||
|
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
|
||||||
|
MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request
|
||||||
|
MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request
|
||||||
|
MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request
|
||||||
|
MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
|
||||||
|
MaxHelperTrieProofsFetch = 64 // Amount of helper tries to be fetched per retrieval request
|
||||||
|
MaxTxSend = 64 // Amount of transactions to be send per request
|
||||||
|
MaxTxStatus = 256 // Amount of transactions to queried per request
|
||||||
|
) |
||||||
|
|
||||||
|
var errTooManyInvalidRequest = errors.New("too many invalid requests made") |
||||||
|
|
||||||
|
// serverHandler is responsible for serving light client and process
|
||||||
|
// all incoming light requests.
|
||||||
|
type serverHandler struct { |
||||||
|
blockchain *core.BlockChain |
||||||
|
chainDb ethdb.Database |
||||||
|
txpool *core.TxPool |
||||||
|
server *LesServer |
||||||
|
|
||||||
|
closeCh chan struct{} // Channel used to exit all background routines of handler.
|
||||||
|
wg sync.WaitGroup // WaitGroup used to track all background routines of handler.
|
||||||
|
synced func() bool // Callback function used to determine whether local node is synced.
|
||||||
|
|
||||||
|
// Testing fields
|
||||||
|
addTxsSync bool |
||||||
|
} |
||||||
|
|
||||||
|
func newServerHandler(server *LesServer, blockchain *core.BlockChain, chainDb ethdb.Database, txpool *core.TxPool, synced func() bool) *serverHandler { |
||||||
|
handler := &serverHandler{ |
||||||
|
server: server, |
||||||
|
blockchain: blockchain, |
||||||
|
chainDb: chainDb, |
||||||
|
txpool: txpool, |
||||||
|
closeCh: make(chan struct{}), |
||||||
|
synced: synced, |
||||||
|
} |
||||||
|
return handler |
||||||
|
} |
||||||
|
|
||||||
|
// start starts the server handler.
|
||||||
|
func (h *serverHandler) start() { |
||||||
|
h.wg.Add(1) |
||||||
|
go h.broadcastHeaders() |
||||||
|
} |
||||||
|
|
||||||
|
// stop stops the server handler.
|
||||||
|
func (h *serverHandler) stop() { |
||||||
|
close(h.closeCh) |
||||||
|
h.wg.Wait() |
||||||
|
} |
||||||
|
|
||||||
|
// runPeer is the p2p protocol run function for the given version.
|
||||||
|
func (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error { |
||||||
|
peer := newPeer(int(version), h.server.config.NetworkId, false, p, newMeteredMsgWriter(rw, int(version))) |
||||||
|
h.wg.Add(1) |
||||||
|
defer h.wg.Done() |
||||||
|
return h.handle(peer) |
||||||
|
} |
||||||
|
|
||||||
|
func (h *serverHandler) handle(p *peer) error { |
||||||
|
// Reject light clients if server is not synced.
|
||||||
|
if !h.synced() { |
||||||
|
return p2p.DiscRequested |
||||||
|
} |
||||||
|
p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) |
||||||
|
|
||||||
|
// Execute the LES handshake
|
||||||
|
var ( |
||||||
|
head = h.blockchain.CurrentHeader() |
||||||
|
hash = head.Hash() |
||||||
|
number = head.Number.Uint64() |
||||||
|
td = h.blockchain.GetTd(hash, number) |
||||||
|
) |
||||||
|
if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), h.server); err != nil { |
||||||
|
p.Log().Debug("Light Ethereum handshake failed", "err", err) |
||||||
|
return err |
||||||
|
} |
||||||
|
defer p.fcClient.Disconnect() |
||||||
|
|
||||||
|
// Register the peer locally
|
||||||
|
if err := h.server.peers.Register(p); err != nil { |
||||||
|
p.Log().Error("Light Ethereum peer registration failed", "err", err) |
||||||
|
return err |
||||||
|
} |
||||||
|
clientConnectionGauge.Update(int64(h.server.peers.Len())) |
||||||
|
|
||||||
|
// add dummy balance tracker for tests
|
||||||
|
if p.balanceTracker == nil { |
||||||
|
p.balanceTracker = &balanceTracker{} |
||||||
|
p.balanceTracker.init(&mclock.System{}, 1) |
||||||
|
} |
||||||
|
|
||||||
|
connectedAt := mclock.Now() |
||||||
|
defer func() { |
||||||
|
p.balanceTracker = nil |
||||||
|
h.server.peers.Unregister(p.id) |
||||||
|
clientConnectionGauge.Update(int64(h.server.peers.Len())) |
||||||
|
connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) |
||||||
|
}() |
||||||
|
|
||||||
|
// Spawn a main loop to handle all incoming messages.
|
||||||
|
for { |
||||||
|
select { |
||||||
|
case err := <-p.errCh: |
||||||
|
p.Log().Debug("Failed to send light ethereum response", "err", err) |
||||||
|
return err |
||||||
|
default: |
||||||
|
} |
||||||
|
if err := h.handleMsg(p); err != nil { |
||||||
|
p.Log().Debug("Light Ethereum message handling failed", "err", err) |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// handleMsg is invoked whenever an inbound message is received from a remote
|
||||||
|
// peer. The remote connection is torn down upon returning any error.
|
||||||
|
func (h *serverHandler) handleMsg(p *peer) error { |
||||||
|
// Read the next message from the remote peer, and ensure it's fully consumed
|
||||||
|
msg, err := p.rw.ReadMsg() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size) |
||||||
|
|
||||||
|
// Discard large message which exceeds the limitation.
|
||||||
|
if msg.Size > ProtocolMaxMsgSize { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) |
||||||
|
} |
||||||
|
defer msg.Discard() |
||||||
|
|
||||||
|
var ( |
||||||
|
maxCost uint64 |
||||||
|
task *servingTask |
||||||
|
) |
||||||
|
p.responseCount++ |
||||||
|
responseCount := p.responseCount |
||||||
|
// accept returns an indicator whether the request can be served.
|
||||||
|
// If so, deduct the max cost from the flow control buffer.
|
||||||
|
accept := func(reqID, reqCnt, maxCnt uint64) bool { |
||||||
|
// Short circuit if the peer is already frozen or the request is invalid.
|
||||||
|
inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0) |
||||||
|
if p.isFrozen() || reqCnt == 0 || reqCnt > maxCnt { |
||||||
|
p.fcClient.OneTimeCost(inSizeCost) |
||||||
|
return false |
||||||
|
} |
||||||
|
// Prepaid max cost units before request been serving.
|
||||||
|
maxCost = p.fcCosts.getMaxCost(msg.Code, reqCnt) |
||||||
|
accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost) |
||||||
|
if !accepted { |
||||||
|
p.freezeClient() |
||||||
|
p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge))) |
||||||
|
p.fcClient.OneTimeCost(inSizeCost) |
||||||
|
return false |
||||||
|
} |
||||||
|
// Create a multi-stage task, estimate the time it takes for the task to
|
||||||
|
// execute, and cache it in the request service queue.
|
||||||
|
factor := h.server.costTracker.globalFactor() |
||||||
|
if factor < 0.001 { |
||||||
|
factor = 1 |
||||||
|
p.Log().Error("Invalid global cost factor", "factor", factor) |
||||||
|
} |
||||||
|
maxTime := uint64(float64(maxCost) / factor) |
||||||
|
task = h.server.servingQueue.newTask(p, maxTime, priority) |
||||||
|
if task.start() { |
||||||
|
return true |
||||||
|
} |
||||||
|
p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost) |
||||||
|
return false |
||||||
|
} |
||||||
|
// sendResponse sends back the response and updates the flow control statistic.
|
||||||
|
sendResponse := func(reqID, amount uint64, reply *reply, servingTime uint64) { |
||||||
|
p.responseLock.Lock() |
||||||
|
defer p.responseLock.Unlock() |
||||||
|
|
||||||
|
// Short circuit if the client is already frozen.
|
||||||
|
if p.isFrozen() { |
||||||
|
realCost := h.server.costTracker.realCost(servingTime, msg.Size, 0) |
||||||
|
p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) |
||||||
|
return |
||||||
|
} |
||||||
|
// Positive correction buffer value with real cost.
|
||||||
|
var replySize uint32 |
||||||
|
if reply != nil { |
||||||
|
replySize = reply.size() |
||||||
|
} |
||||||
|
var realCost uint64 |
||||||
|
if h.server.costTracker.testing { |
||||||
|
realCost = maxCost // Assign a fake cost for testing purpose
|
||||||
|
} else { |
||||||
|
realCost = h.server.costTracker.realCost(servingTime, msg.Size, replySize) |
||||||
|
} |
||||||
|
bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) |
||||||
|
if amount != 0 { |
||||||
|
// Feed cost tracker request serving statistic.
|
||||||
|
h.server.costTracker.updateStats(msg.Code, amount, servingTime, realCost) |
||||||
|
// Reduce priority "balance" for the specific peer.
|
||||||
|
p.balanceTracker.requestCost(realCost) |
||||||
|
} |
||||||
|
if reply != nil { |
||||||
|
p.queueSend(func() { |
||||||
|
if err := reply.send(bv); err != nil { |
||||||
|
select { |
||||||
|
case p.errCh <- err: |
||||||
|
default: |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
} |
||||||
|
} |
||||||
|
switch msg.Code { |
||||||
|
case GetBlockHeadersMsg: |
||||||
|
p.Log().Trace("Received block header request") |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscInHeaderPacketsMeter.Mark(1) |
||||||
|
miscInHeaderTrafficMeter.Mark(int64(msg.Size)) |
||||||
|
} |
||||||
|
var req struct { |
||||||
|
ReqID uint64 |
||||||
|
Query getBlockHeadersData |
||||||
|
} |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrDecode, "%v: %v", msg, err) |
||||||
|
} |
||||||
|
query := req.Query |
||||||
|
if accept(req.ReqID, query.Amount, MaxHeaderFetch) { |
||||||
|
go func() { |
||||||
|
hashMode := query.Origin.Hash != (common.Hash{}) |
||||||
|
first := true |
||||||
|
maxNonCanonical := uint64(100) |
||||||
|
|
||||||
|
// Gather headers until the fetch or network limits is reached
|
||||||
|
var ( |
||||||
|
bytes common.StorageSize |
||||||
|
headers []*types.Header |
||||||
|
unknown bool |
||||||
|
) |
||||||
|
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit { |
||||||
|
if !first && !task.waitOrStop() { |
||||||
|
sendResponse(req.ReqID, 0, nil, task.servingTime) |
||||||
|
return |
||||||
|
} |
||||||
|
// Retrieve the next header satisfying the query
|
||||||
|
var origin *types.Header |
||||||
|
if hashMode { |
||||||
|
if first { |
||||||
|
origin = h.blockchain.GetHeaderByHash(query.Origin.Hash) |
||||||
|
if origin != nil { |
||||||
|
query.Origin.Number = origin.Number.Uint64() |
||||||
|
} |
||||||
|
} else { |
||||||
|
origin = h.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number) |
||||||
|
} |
||||||
|
} else { |
||||||
|
origin = h.blockchain.GetHeaderByNumber(query.Origin.Number) |
||||||
|
} |
||||||
|
if origin == nil { |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
break |
||||||
|
} |
||||||
|
headers = append(headers, origin) |
||||||
|
bytes += estHeaderRlpSize |
||||||
|
|
||||||
|
// Advance to the next header of the query
|
||||||
|
switch { |
||||||
|
case hashMode && query.Reverse: |
||||||
|
// Hash based traversal towards the genesis block
|
||||||
|
ancestor := query.Skip + 1 |
||||||
|
if ancestor == 0 { |
||||||
|
unknown = true |
||||||
|
} else { |
||||||
|
query.Origin.Hash, query.Origin.Number = h.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) |
||||||
|
unknown = query.Origin.Hash == common.Hash{} |
||||||
|
} |
||||||
|
case hashMode && !query.Reverse: |
||||||
|
// Hash based traversal towards the leaf block
|
||||||
|
var ( |
||||||
|
current = origin.Number.Uint64() |
||||||
|
next = current + query.Skip + 1 |
||||||
|
) |
||||||
|
if next <= current { |
||||||
|
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") |
||||||
|
p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) |
||||||
|
unknown = true |
||||||
|
} else { |
||||||
|
if header := h.blockchain.GetHeaderByNumber(next); header != nil { |
||||||
|
nextHash := header.Hash() |
||||||
|
expOldHash, _ := h.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) |
||||||
|
if expOldHash == query.Origin.Hash { |
||||||
|
query.Origin.Hash, query.Origin.Number = nextHash, next |
||||||
|
} else { |
||||||
|
unknown = true |
||||||
|
} |
||||||
|
} else { |
||||||
|
unknown = true |
||||||
|
} |
||||||
|
} |
||||||
|
case query.Reverse: |
||||||
|
// Number based traversal towards the genesis block
|
||||||
|
if query.Origin.Number >= query.Skip+1 { |
||||||
|
query.Origin.Number -= query.Skip + 1 |
||||||
|
} else { |
||||||
|
unknown = true |
||||||
|
} |
||||||
|
|
||||||
|
case !query.Reverse: |
||||||
|
// Number based traversal towards the leaf block
|
||||||
|
query.Origin.Number += query.Skip + 1 |
||||||
|
} |
||||||
|
first = false |
||||||
|
} |
||||||
|
reply := p.ReplyBlockHeaders(req.ReqID, headers) |
||||||
|
sendResponse(req.ReqID, query.Amount, p.ReplyBlockHeaders(req.ReqID, headers), task.done()) |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscOutHeaderPacketsMeter.Mark(1) |
||||||
|
miscOutHeaderTrafficMeter.Mark(int64(reply.size())) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
case GetBlockBodiesMsg: |
||||||
|
p.Log().Trace("Received block bodies request") |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscInBodyPacketsMeter.Mark(1) |
||||||
|
miscInBodyTrafficMeter.Mark(int64(msg.Size)) |
||||||
|
} |
||||||
|
var req struct { |
||||||
|
ReqID uint64 |
||||||
|
Hashes []common.Hash |
||||||
|
} |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
var ( |
||||||
|
bytes int |
||||||
|
bodies []rlp.RawValue |
||||||
|
) |
||||||
|
reqCnt := len(req.Hashes) |
||||||
|
if accept(req.ReqID, uint64(reqCnt), MaxBodyFetch) { |
||||||
|
go func() { |
||||||
|
for i, hash := range req.Hashes { |
||||||
|
if i != 0 && !task.waitOrStop() { |
||||||
|
sendResponse(req.ReqID, 0, nil, task.servingTime) |
||||||
|
return |
||||||
|
} |
||||||
|
if bytes >= softResponseLimit { |
||||||
|
break |
||||||
|
} |
||||||
|
body := h.blockchain.GetBodyRLP(hash) |
||||||
|
if body == nil { |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
bodies = append(bodies, body) |
||||||
|
bytes += len(body) |
||||||
|
} |
||||||
|
reply := p.ReplyBlockBodiesRLP(req.ReqID, bodies) |
||||||
|
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscOutBodyPacketsMeter.Mark(1) |
||||||
|
miscOutBodyTrafficMeter.Mark(int64(reply.size())) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
case GetCodeMsg: |
||||||
|
p.Log().Trace("Received code request") |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscInCodePacketsMeter.Mark(1) |
||||||
|
miscInCodeTrafficMeter.Mark(int64(msg.Size)) |
||||||
|
} |
||||||
|
var req struct { |
||||||
|
ReqID uint64 |
||||||
|
Reqs []CodeReq |
||||||
|
} |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
var ( |
||||||
|
bytes int |
||||||
|
data [][]byte |
||||||
|
) |
||||||
|
reqCnt := len(req.Reqs) |
||||||
|
if accept(req.ReqID, uint64(reqCnt), MaxCodeFetch) { |
||||||
|
go func() { |
||||||
|
for i, request := range req.Reqs { |
||||||
|
if i != 0 && !task.waitOrStop() { |
||||||
|
sendResponse(req.ReqID, 0, nil, task.servingTime) |
||||||
|
return |
||||||
|
} |
||||||
|
// Look up the root hash belonging to the request
|
||||||
|
header := h.blockchain.GetHeaderByHash(request.BHash) |
||||||
|
if header == nil { |
||||||
|
p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash) |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
// Refuse to search stale state data in the database since looking for
|
||||||
|
// a non-exist key is kind of expensive.
|
||||||
|
local := h.blockchain.CurrentHeader().Number.Uint64() |
||||||
|
if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local { |
||||||
|
p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local) |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
triedb := h.blockchain.StateCache().TrieDB() |
||||||
|
|
||||||
|
account, err := h.getAccount(triedb, header.Root, common.BytesToHash(request.AccKey)) |
||||||
|
if err != nil { |
||||||
|
p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
code, err := triedb.Node(common.BytesToHash(account.CodeHash)) |
||||||
|
if err != nil { |
||||||
|
p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err) |
||||||
|
continue |
||||||
|
} |
||||||
|
// Accumulate the code and abort if enough data was retrieved
|
||||||
|
data = append(data, code) |
||||||
|
if bytes += len(code); bytes >= softResponseLimit { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
reply := p.ReplyCode(req.ReqID, data) |
||||||
|
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscOutCodePacketsMeter.Mark(1) |
||||||
|
miscOutCodeTrafficMeter.Mark(int64(reply.size())) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
case GetReceiptsMsg: |
||||||
|
p.Log().Trace("Received receipts request") |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscInReceiptPacketsMeter.Mark(1) |
||||||
|
miscInReceiptTrafficMeter.Mark(int64(msg.Size)) |
||||||
|
} |
||||||
|
var req struct { |
||||||
|
ReqID uint64 |
||||||
|
Hashes []common.Hash |
||||||
|
} |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
var ( |
||||||
|
bytes int |
||||||
|
receipts []rlp.RawValue |
||||||
|
) |
||||||
|
reqCnt := len(req.Hashes) |
||||||
|
if accept(req.ReqID, uint64(reqCnt), MaxReceiptFetch) { |
||||||
|
go func() { |
||||||
|
for i, hash := range req.Hashes { |
||||||
|
if i != 0 && !task.waitOrStop() { |
||||||
|
sendResponse(req.ReqID, 0, nil, task.servingTime) |
||||||
|
return |
||||||
|
} |
||||||
|
if bytes >= softResponseLimit { |
||||||
|
break |
||||||
|
} |
||||||
|
// Retrieve the requested block's receipts, skipping if unknown to us
|
||||||
|
results := h.blockchain.GetReceiptsByHash(hash) |
||||||
|
if results == nil { |
||||||
|
if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
// If known, encode and queue for response packet
|
||||||
|
if encoded, err := rlp.EncodeToBytes(results); err != nil { |
||||||
|
log.Error("Failed to encode receipt", "err", err) |
||||||
|
} else { |
||||||
|
receipts = append(receipts, encoded) |
||||||
|
bytes += len(encoded) |
||||||
|
} |
||||||
|
} |
||||||
|
reply := p.ReplyReceiptsRLP(req.ReqID, receipts) |
||||||
|
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscOutReceiptPacketsMeter.Mark(1) |
||||||
|
miscOutReceiptTrafficMeter.Mark(int64(reply.size())) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
case GetProofsV2Msg: |
||||||
|
p.Log().Trace("Received les/2 proofs request") |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscInTrieProofPacketsMeter.Mark(1) |
||||||
|
miscInTrieProofTrafficMeter.Mark(int64(msg.Size)) |
||||||
|
} |
||||||
|
var req struct { |
||||||
|
ReqID uint64 |
||||||
|
Reqs []ProofReq |
||||||
|
} |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
// Gather state data until the fetch or network limits is reached
|
||||||
|
var ( |
||||||
|
lastBHash common.Hash |
||||||
|
root common.Hash |
||||||
|
) |
||||||
|
reqCnt := len(req.Reqs) |
||||||
|
if accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) { |
||||||
|
go func() { |
||||||
|
nodes := light.NewNodeSet() |
||||||
|
|
||||||
|
for i, request := range req.Reqs { |
||||||
|
if i != 0 && !task.waitOrStop() { |
||||||
|
sendResponse(req.ReqID, 0, nil, task.servingTime) |
||||||
|
return |
||||||
|
} |
||||||
|
// Look up the root hash belonging to the request
|
||||||
|
var ( |
||||||
|
number *uint64 |
||||||
|
header *types.Header |
||||||
|
trie state.Trie |
||||||
|
) |
||||||
|
if request.BHash != lastBHash { |
||||||
|
root, lastBHash = common.Hash{}, request.BHash |
||||||
|
|
||||||
|
if header = h.blockchain.GetHeaderByHash(request.BHash); header == nil { |
||||||
|
p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", request.BHash) |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
// Refuse to search stale state data in the database since looking for
|
||||||
|
// a non-exist key is kind of expensive.
|
||||||
|
local := h.blockchain.CurrentHeader().Number.Uint64() |
||||||
|
if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local { |
||||||
|
p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local) |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
root = header.Root |
||||||
|
} |
||||||
|
// If a header lookup failed (non existent), ignore subsequent requests for the same header
|
||||||
|
if root == (common.Hash{}) { |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
// Open the account or storage trie for the request
|
||||||
|
statedb := h.blockchain.StateCache() |
||||||
|
|
||||||
|
switch len(request.AccKey) { |
||||||
|
case 0: |
||||||
|
// No account key specified, open an account trie
|
||||||
|
trie, err = statedb.OpenTrie(root) |
||||||
|
if trie == nil || err != nil { |
||||||
|
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err) |
||||||
|
continue |
||||||
|
} |
||||||
|
default: |
||||||
|
// Account key specified, open a storage trie
|
||||||
|
account, err := h.getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey)) |
||||||
|
if err != nil { |
||||||
|
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) |
||||||
|
atomic.AddUint32(&p.invalidCount, 1) |
||||||
|
continue |
||||||
|
} |
||||||
|
trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root) |
||||||
|
if trie == nil || err != nil { |
||||||
|
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err) |
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
// Prove the user's request from the account or stroage trie
|
||||||
|
if err := trie.Prove(request.Key, request.FromLevel, nodes); err != nil { |
||||||
|
p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err) |
||||||
|
continue |
||||||
|
} |
||||||
|
if nodes.DataSize() >= softResponseLimit { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
reply := p.ReplyProofsV2(req.ReqID, nodes.NodeList()) |
||||||
|
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscOutTrieProofPacketsMeter.Mark(1) |
||||||
|
miscOutTrieProofTrafficMeter.Mark(int64(reply.size())) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
case GetHelperTrieProofsMsg: |
||||||
|
p.Log().Trace("Received helper trie proof request") |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscInHelperTriePacketsMeter.Mark(1) |
||||||
|
miscInHelperTrieTrafficMeter.Mark(int64(msg.Size)) |
||||||
|
} |
||||||
|
var req struct { |
||||||
|
ReqID uint64 |
||||||
|
Reqs []HelperTrieReq |
||||||
|
} |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
// Gather state data until the fetch or network limits is reached
|
||||||
|
var ( |
||||||
|
auxBytes int |
||||||
|
auxData [][]byte |
||||||
|
) |
||||||
|
reqCnt := len(req.Reqs) |
||||||
|
if accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) { |
||||||
|
go func() { |
||||||
|
var ( |
||||||
|
lastIdx uint64 |
||||||
|
lastType uint |
||||||
|
root common.Hash |
||||||
|
auxTrie *trie.Trie |
||||||
|
) |
||||||
|
nodes := light.NewNodeSet() |
||||||
|
for i, request := range req.Reqs { |
||||||
|
if i != 0 && !task.waitOrStop() { |
||||||
|
sendResponse(req.ReqID, 0, nil, task.servingTime) |
||||||
|
return |
||||||
|
} |
||||||
|
if auxTrie == nil || request.Type != lastType || request.TrieIdx != lastIdx { |
||||||
|
auxTrie, lastType, lastIdx = nil, request.Type, request.TrieIdx |
||||||
|
|
||||||
|
var prefix string |
||||||
|
if root, prefix = h.getHelperTrie(request.Type, request.TrieIdx); root != (common.Hash{}) { |
||||||
|
auxTrie, _ = trie.New(root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix))) |
||||||
|
} |
||||||
|
} |
||||||
|
if request.AuxReq == auxRoot { |
||||||
|
var data []byte |
||||||
|
if root != (common.Hash{}) { |
||||||
|
data = root[:] |
||||||
|
} |
||||||
|
auxData = append(auxData, data) |
||||||
|
auxBytes += len(data) |
||||||
|
} else { |
||||||
|
if auxTrie != nil { |
||||||
|
auxTrie.Prove(request.Key, request.FromLevel, nodes) |
||||||
|
} |
||||||
|
if request.AuxReq != 0 { |
||||||
|
data := h.getAuxiliaryHeaders(request) |
||||||
|
auxData = append(auxData, data) |
||||||
|
auxBytes += len(data) |
||||||
|
} |
||||||
|
} |
||||||
|
if nodes.DataSize()+auxBytes >= softResponseLimit { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
reply := p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}) |
||||||
|
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscOutHelperTriePacketsMeter.Mark(1) |
||||||
|
miscOutHelperTrieTrafficMeter.Mark(int64(reply.size())) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
case SendTxV2Msg: |
||||||
|
p.Log().Trace("Received new transactions") |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscInTxsPacketsMeter.Mark(1) |
||||||
|
miscInTxsTrafficMeter.Mark(int64(msg.Size)) |
||||||
|
} |
||||||
|
var req struct { |
||||||
|
ReqID uint64 |
||||||
|
Txs []*types.Transaction |
||||||
|
} |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
reqCnt := len(req.Txs) |
||||||
|
if accept(req.ReqID, uint64(reqCnt), MaxTxSend) { |
||||||
|
go func() { |
||||||
|
stats := make([]light.TxStatus, len(req.Txs)) |
||||||
|
for i, tx := range req.Txs { |
||||||
|
if i != 0 && !task.waitOrStop() { |
||||||
|
return |
||||||
|
} |
||||||
|
hash := tx.Hash() |
||||||
|
stats[i] = h.txStatus(hash) |
||||||
|
if stats[i].Status == core.TxStatusUnknown { |
||||||
|
addFn := h.txpool.AddRemotes |
||||||
|
// Add txs synchronously for testing purpose
|
||||||
|
if h.addTxsSync { |
||||||
|
addFn = h.txpool.AddRemotesSync |
||||||
|
} |
||||||
|
if errs := addFn([]*types.Transaction{tx}); errs[0] != nil { |
||||||
|
stats[i].Error = errs[0].Error() |
||||||
|
continue |
||||||
|
} |
||||||
|
stats[i] = h.txStatus(hash) |
||||||
|
} |
||||||
|
} |
||||||
|
reply := p.ReplyTxStatus(req.ReqID, stats) |
||||||
|
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscOutTxsPacketsMeter.Mark(1) |
||||||
|
miscOutTxsTrafficMeter.Mark(int64(reply.size())) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
case GetTxStatusMsg: |
||||||
|
p.Log().Trace("Received transaction status query request") |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscInTxStatusPacketsMeter.Mark(1) |
||||||
|
miscInTxStatusTrafficMeter.Mark(int64(msg.Size)) |
||||||
|
} |
||||||
|
var req struct { |
||||||
|
ReqID uint64 |
||||||
|
Hashes []common.Hash |
||||||
|
} |
||||||
|
if err := msg.Decode(&req); err != nil { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrDecode, "msg %v: %v", msg, err) |
||||||
|
} |
||||||
|
reqCnt := len(req.Hashes) |
||||||
|
if accept(req.ReqID, uint64(reqCnt), MaxTxStatus) { |
||||||
|
go func() { |
||||||
|
stats := make([]light.TxStatus, len(req.Hashes)) |
||||||
|
for i, hash := range req.Hashes { |
||||||
|
if i != 0 && !task.waitOrStop() { |
||||||
|
sendResponse(req.ReqID, 0, nil, task.servingTime) |
||||||
|
return |
||||||
|
} |
||||||
|
stats[i] = h.txStatus(hash) |
||||||
|
} |
||||||
|
reply := p.ReplyTxStatus(req.ReqID, stats) |
||||||
|
sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) |
||||||
|
if metrics.EnabledExpensive { |
||||||
|
miscOutTxStatusPacketsMeter.Mark(1) |
||||||
|
miscOutTxStatusTrafficMeter.Mark(int64(reply.size())) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
default: |
||||||
|
p.Log().Trace("Received invalid message", "code", msg.Code) |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errResp(ErrInvalidMsgCode, "%v", msg.Code) |
||||||
|
} |
||||||
|
// If the client has made too much invalid request(e.g. request a non-exist data),
|
||||||
|
// reject them to prevent SPAM attack.
|
||||||
|
if atomic.LoadUint32(&p.invalidCount) > maxRequestErrors { |
||||||
|
clientErrorMeter.Mark(1) |
||||||
|
return errTooManyInvalidRequest |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// getAccount retrieves an account from the state based on root.
|
||||||
|
func (h *serverHandler) getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) { |
||||||
|
trie, err := trie.New(root, triedb) |
||||||
|
if err != nil { |
||||||
|
return state.Account{}, err |
||||||
|
} |
||||||
|
blob, err := trie.TryGet(hash[:]) |
||||||
|
if err != nil { |
||||||
|
return state.Account{}, err |
||||||
|
} |
||||||
|
var account state.Account |
||||||
|
if err = rlp.DecodeBytes(blob, &account); err != nil { |
||||||
|
return state.Account{}, err |
||||||
|
} |
||||||
|
return account, nil |
||||||
|
} |
||||||
|
|
||||||
|
// getHelperTrie returns the post-processed trie root for the given trie ID and section index
|
||||||
|
func (h *serverHandler) getHelperTrie(typ uint, index uint64) (common.Hash, string) { |
||||||
|
switch typ { |
||||||
|
case htCanonical: |
||||||
|
sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.ChtSize-1) |
||||||
|
return light.GetChtRoot(h.chainDb, index, sectionHead), light.ChtTablePrefix |
||||||
|
case htBloomBits: |
||||||
|
sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.BloomTrieSize-1) |
||||||
|
return light.GetBloomTrieRoot(h.chainDb, index, sectionHead), light.BloomTrieTablePrefix |
||||||
|
} |
||||||
|
return common.Hash{}, "" |
||||||
|
} |
||||||
|
|
||||||
|
// getAuxiliaryHeaders returns requested auxiliary headers for the CHT request.
|
||||||
|
func (h *serverHandler) getAuxiliaryHeaders(req HelperTrieReq) []byte { |
||||||
|
if req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8 { |
||||||
|
blockNum := binary.BigEndian.Uint64(req.Key) |
||||||
|
hash := rawdb.ReadCanonicalHash(h.chainDb, blockNum) |
||||||
|
return rawdb.ReadHeaderRLP(h.chainDb, hash, blockNum) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// txStatus returns the status of a specified transaction.
|
||||||
|
func (h *serverHandler) txStatus(hash common.Hash) light.TxStatus { |
||||||
|
var stat light.TxStatus |
||||||
|
// Looking the transaction in txpool first.
|
||||||
|
stat.Status = h.txpool.Status([]common.Hash{hash})[0] |
||||||
|
|
||||||
|
// If the transaction is unknown to the pool, try looking it up locally.
|
||||||
|
if stat.Status == core.TxStatusUnknown { |
||||||
|
lookup := h.blockchain.GetTransactionLookup(hash) |
||||||
|
if lookup != nil { |
||||||
|
stat.Status = core.TxStatusIncluded |
||||||
|
stat.Lookup = lookup |
||||||
|
} |
||||||
|
} |
||||||
|
return stat |
||||||
|
} |
||||||
|
|
||||||
|
// broadcastHeaders broadcasts new block information to all connected light
|
||||||
|
// clients. According to the agreement between client and server, server should
|
||||||
|
// only broadcast new announcement if the total difficulty is higher than the
|
||||||
|
// last one. Besides server will add the signature if client requires.
|
||||||
|
func (h *serverHandler) broadcastHeaders() { |
||||||
|
defer h.wg.Done() |
||||||
|
|
||||||
|
headCh := make(chan core.ChainHeadEvent, 10) |
||||||
|
headSub := h.blockchain.SubscribeChainHeadEvent(headCh) |
||||||
|
defer headSub.Unsubscribe() |
||||||
|
|
||||||
|
var ( |
||||||
|
lastHead *types.Header |
||||||
|
lastTd = common.Big0 |
||||||
|
) |
||||||
|
for { |
||||||
|
select { |
||||||
|
case ev := <-headCh: |
||||||
|
peers := h.server.peers.AllPeers() |
||||||
|
if len(peers) == 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
header := ev.Block.Header() |
||||||
|
hash, number := header.Hash(), header.Number.Uint64() |
||||||
|
td := h.blockchain.GetTd(hash, number) |
||||||
|
if td == nil || td.Cmp(lastTd) <= 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
var reorg uint64 |
||||||
|
if lastHead != nil { |
||||||
|
reorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(h.chainDb, header, lastHead).Number.Uint64() |
||||||
|
} |
||||||
|
lastHead, lastTd = header, td |
||||||
|
|
||||||
|
log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg) |
||||||
|
var ( |
||||||
|
signed bool |
||||||
|
signedAnnounce announceData |
||||||
|
) |
||||||
|
announce := announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg} |
||||||
|
for _, p := range peers { |
||||||
|
p := p |
||||||
|
switch p.announceType { |
||||||
|
case announceTypeSimple: |
||||||
|
p.queueSend(func() { p.SendAnnounce(announce) }) |
||||||
|
case announceTypeSigned: |
||||||
|
if !signed { |
||||||
|
signedAnnounce = announce |
||||||
|
signedAnnounce.sign(h.server.privateKey) |
||||||
|
signed = true |
||||||
|
} |
||||||
|
p.queueSend(func() { p.SendAnnounce(signedAnnounce) }) |
||||||
|
} |
||||||
|
} |
||||||
|
case <-h.closeCh: |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
} |
Loading…
Reference in new issue