mirror of https://github.com/ethereum/go-ethereum
Merge pull request #23561 from karalabe/temp-split-leseh
les: duplicate downloader and fetcher to allow progressive refactoringpull/23565/head
commit
9ada4a2e2c
@ -0,0 +1,166 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"sync" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum" |
||||||
|
"github.com/ethereum/go-ethereum/event" |
||||||
|
"github.com/ethereum/go-ethereum/rpc" |
||||||
|
) |
||||||
|
|
||||||
|
// PublicDownloaderAPI provides an API which gives information about the current synchronisation status.
|
||||||
|
// It offers only methods that operates on data that can be available to anyone without security risks.
|
||||||
|
type PublicDownloaderAPI struct { |
||||||
|
d *Downloader |
||||||
|
mux *event.TypeMux |
||||||
|
installSyncSubscription chan chan interface{} |
||||||
|
uninstallSyncSubscription chan *uninstallSyncSubscriptionRequest |
||||||
|
} |
||||||
|
|
||||||
|
// NewPublicDownloaderAPI create a new PublicDownloaderAPI. The API has an internal event loop that
|
||||||
|
// listens for events from the downloader through the global event mux. In case it receives one of
|
||||||
|
// these events it broadcasts it to all syncing subscriptions that are installed through the
|
||||||
|
// installSyncSubscription channel.
|
||||||
|
func NewPublicDownloaderAPI(d *Downloader, m *event.TypeMux) *PublicDownloaderAPI { |
||||||
|
api := &PublicDownloaderAPI{ |
||||||
|
d: d, |
||||||
|
mux: m, |
||||||
|
installSyncSubscription: make(chan chan interface{}), |
||||||
|
uninstallSyncSubscription: make(chan *uninstallSyncSubscriptionRequest), |
||||||
|
} |
||||||
|
|
||||||
|
go api.eventLoop() |
||||||
|
|
||||||
|
return api |
||||||
|
} |
||||||
|
|
||||||
|
// eventLoop runs a loop until the event mux closes. It will install and uninstall new
|
||||||
|
// sync subscriptions and broadcasts sync status updates to the installed sync subscriptions.
|
||||||
|
func (api *PublicDownloaderAPI) eventLoop() { |
||||||
|
var ( |
||||||
|
sub = api.mux.Subscribe(StartEvent{}, DoneEvent{}, FailedEvent{}) |
||||||
|
syncSubscriptions = make(map[chan interface{}]struct{}) |
||||||
|
) |
||||||
|
|
||||||
|
for { |
||||||
|
select { |
||||||
|
case i := <-api.installSyncSubscription: |
||||||
|
syncSubscriptions[i] = struct{}{} |
||||||
|
case u := <-api.uninstallSyncSubscription: |
||||||
|
delete(syncSubscriptions, u.c) |
||||||
|
close(u.uninstalled) |
||||||
|
case event := <-sub.Chan(): |
||||||
|
if event == nil { |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
var notification interface{} |
||||||
|
switch event.Data.(type) { |
||||||
|
case StartEvent: |
||||||
|
notification = &SyncingResult{ |
||||||
|
Syncing: true, |
||||||
|
Status: api.d.Progress(), |
||||||
|
} |
||||||
|
case DoneEvent, FailedEvent: |
||||||
|
notification = false |
||||||
|
} |
||||||
|
// broadcast
|
||||||
|
for c := range syncSubscriptions { |
||||||
|
c <- notification |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Syncing provides information when this nodes starts synchronising with the Ethereum network and when it's finished.
|
||||||
|
func (api *PublicDownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription, error) { |
||||||
|
notifier, supported := rpc.NotifierFromContext(ctx) |
||||||
|
if !supported { |
||||||
|
return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported |
||||||
|
} |
||||||
|
|
||||||
|
rpcSub := notifier.CreateSubscription() |
||||||
|
|
||||||
|
go func() { |
||||||
|
statuses := make(chan interface{}) |
||||||
|
sub := api.SubscribeSyncStatus(statuses) |
||||||
|
|
||||||
|
for { |
||||||
|
select { |
||||||
|
case status := <-statuses: |
||||||
|
notifier.Notify(rpcSub.ID, status) |
||||||
|
case <-rpcSub.Err(): |
||||||
|
sub.Unsubscribe() |
||||||
|
return |
||||||
|
case <-notifier.Closed(): |
||||||
|
sub.Unsubscribe() |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
}() |
||||||
|
|
||||||
|
return rpcSub, nil |
||||||
|
} |
||||||
|
|
||||||
|
// SyncingResult provides information about the current synchronisation status for this node.
|
||||||
|
type SyncingResult struct { |
||||||
|
Syncing bool `json:"syncing"` |
||||||
|
Status ethereum.SyncProgress `json:"status"` |
||||||
|
} |
||||||
|
|
||||||
|
// uninstallSyncSubscriptionRequest uninstalles a syncing subscription in the API event loop.
|
||||||
|
type uninstallSyncSubscriptionRequest struct { |
||||||
|
c chan interface{} |
||||||
|
uninstalled chan interface{} |
||||||
|
} |
||||||
|
|
||||||
|
// SyncStatusSubscription represents a syncing subscription.
|
||||||
|
type SyncStatusSubscription struct { |
||||||
|
api *PublicDownloaderAPI // register subscription in event loop of this api instance
|
||||||
|
c chan interface{} // channel where events are broadcasted to
|
||||||
|
unsubOnce sync.Once // make sure unsubscribe logic is executed once
|
||||||
|
} |
||||||
|
|
||||||
|
// Unsubscribe uninstalls the subscription from the DownloadAPI event loop.
|
||||||
|
// The status channel that was passed to subscribeSyncStatus isn't used anymore
|
||||||
|
// after this method returns.
|
||||||
|
func (s *SyncStatusSubscription) Unsubscribe() { |
||||||
|
s.unsubOnce.Do(func() { |
||||||
|
req := uninstallSyncSubscriptionRequest{s.c, make(chan interface{})} |
||||||
|
s.api.uninstallSyncSubscription <- &req |
||||||
|
|
||||||
|
for { |
||||||
|
select { |
||||||
|
case <-s.c: |
||||||
|
// drop new status events until uninstall confirmation
|
||||||
|
continue |
||||||
|
case <-req.uninstalled: |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
}) |
||||||
|
} |
||||||
|
|
||||||
|
// SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates.
|
||||||
|
// The given channel must receive interface values, the result can either
|
||||||
|
func (api *PublicDownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription { |
||||||
|
api.installSyncSubscription <- status |
||||||
|
return &SyncStatusSubscription{api: api, c: status} |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,25 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/core/types" |
||||||
|
|
||||||
|
type DoneEvent struct { |
||||||
|
Latest *types.Header |
||||||
|
} |
||||||
|
type StartEvent struct{} |
||||||
|
type FailedEvent struct{ Err error } |
@ -0,0 +1,45 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Contains the metrics collected by the downloader.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/ethereum/go-ethereum/metrics" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
headerInMeter = metrics.NewRegisteredMeter("eth/downloader/headers/in", nil) |
||||||
|
headerReqTimer = metrics.NewRegisteredTimer("eth/downloader/headers/req", nil) |
||||||
|
headerDropMeter = metrics.NewRegisteredMeter("eth/downloader/headers/drop", nil) |
||||||
|
headerTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/headers/timeout", nil) |
||||||
|
|
||||||
|
bodyInMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/in", nil) |
||||||
|
bodyReqTimer = metrics.NewRegisteredTimer("eth/downloader/bodies/req", nil) |
||||||
|
bodyDropMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/drop", nil) |
||||||
|
bodyTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/bodies/timeout", nil) |
||||||
|
|
||||||
|
receiptInMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/in", nil) |
||||||
|
receiptReqTimer = metrics.NewRegisteredTimer("eth/downloader/receipts/req", nil) |
||||||
|
receiptDropMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/drop", nil) |
||||||
|
receiptTimeoutMeter = metrics.NewRegisteredMeter("eth/downloader/receipts/timeout", nil) |
||||||
|
|
||||||
|
stateInMeter = metrics.NewRegisteredMeter("eth/downloader/states/in", nil) |
||||||
|
stateDropMeter = metrics.NewRegisteredMeter("eth/downloader/states/drop", nil) |
||||||
|
|
||||||
|
throttleCounter = metrics.NewRegisteredCounter("eth/downloader/throttle", nil) |
||||||
|
) |
@ -0,0 +1,81 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import "fmt" |
||||||
|
|
||||||
|
// SyncMode represents the synchronisation mode of the downloader.
|
||||||
|
// It is a uint32 as it is used with atomic operations.
|
||||||
|
type SyncMode uint32 |
||||||
|
|
||||||
|
const ( |
||||||
|
FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks
|
||||||
|
FastSync // Quickly download the headers, full sync only at the chain
|
||||||
|
SnapSync // Download the chain and the state via compact snapshots
|
||||||
|
LightSync // Download only the headers and terminate afterwards
|
||||||
|
) |
||||||
|
|
||||||
|
func (mode SyncMode) IsValid() bool { |
||||||
|
return mode >= FullSync && mode <= LightSync |
||||||
|
} |
||||||
|
|
||||||
|
// String implements the stringer interface.
|
||||||
|
func (mode SyncMode) String() string { |
||||||
|
switch mode { |
||||||
|
case FullSync: |
||||||
|
return "full" |
||||||
|
case FastSync: |
||||||
|
return "fast" |
||||||
|
case SnapSync: |
||||||
|
return "snap" |
||||||
|
case LightSync: |
||||||
|
return "light" |
||||||
|
default: |
||||||
|
return "unknown" |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (mode SyncMode) MarshalText() ([]byte, error) { |
||||||
|
switch mode { |
||||||
|
case FullSync: |
||||||
|
return []byte("full"), nil |
||||||
|
case FastSync: |
||||||
|
return []byte("fast"), nil |
||||||
|
case SnapSync: |
||||||
|
return []byte("snap"), nil |
||||||
|
case LightSync: |
||||||
|
return []byte("light"), nil |
||||||
|
default: |
||||||
|
return nil, fmt.Errorf("unknown sync mode %d", mode) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (mode *SyncMode) UnmarshalText(text []byte) error { |
||||||
|
switch string(text) { |
||||||
|
case "full": |
||||||
|
*mode = FullSync |
||||||
|
case "fast": |
||||||
|
*mode = FastSync |
||||||
|
case "snap": |
||||||
|
*mode = SnapSync |
||||||
|
case "light": |
||||||
|
*mode = LightSync |
||||||
|
default: |
||||||
|
return fmt.Errorf(`unknown sync mode %q, want "full", "fast" or "light"`, text) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,501 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Contains the active peer-set of the downloader, maintaining both failures
|
||||||
|
// as well as reputation metrics to prioritize the block retrievals.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"math/big" |
||||||
|
"sort" |
||||||
|
"sync" |
||||||
|
"sync/atomic" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/eth" |
||||||
|
"github.com/ethereum/go-ethereum/event" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/p2p/msgrate" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
maxLackingHashes = 4096 // Maximum number of entries allowed on the list or lacking items
|
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
errAlreadyFetching = errors.New("already fetching blocks from peer") |
||||||
|
errAlreadyRegistered = errors.New("peer is already registered") |
||||||
|
errNotRegistered = errors.New("peer is not registered") |
||||||
|
) |
||||||
|
|
||||||
|
// peerConnection represents an active peer from which hashes and blocks are retrieved.
|
||||||
|
type peerConnection struct { |
||||||
|
id string // Unique identifier of the peer
|
||||||
|
|
||||||
|
headerIdle int32 // Current header activity state of the peer (idle = 0, active = 1)
|
||||||
|
blockIdle int32 // Current block activity state of the peer (idle = 0, active = 1)
|
||||||
|
receiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)
|
||||||
|
stateIdle int32 // Current node data activity state of the peer (idle = 0, active = 1)
|
||||||
|
|
||||||
|
headerStarted time.Time // Time instance when the last header fetch was started
|
||||||
|
blockStarted time.Time // Time instance when the last block (body) fetch was started
|
||||||
|
receiptStarted time.Time // Time instance when the last receipt fetch was started
|
||||||
|
stateStarted time.Time // Time instance when the last node data fetch was started
|
||||||
|
|
||||||
|
rates *msgrate.Tracker // Tracker to hone in on the number of items retrievable per second
|
||||||
|
lacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)
|
||||||
|
|
||||||
|
peer Peer |
||||||
|
|
||||||
|
version uint // Eth protocol version number to switch strategies
|
||||||
|
log log.Logger // Contextual logger to add extra infos to peer logs
|
||||||
|
lock sync.RWMutex |
||||||
|
} |
||||||
|
|
||||||
|
// LightPeer encapsulates the methods required to synchronise with a remote light peer.
|
||||||
|
type LightPeer interface { |
||||||
|
Head() (common.Hash, *big.Int) |
||||||
|
RequestHeadersByHash(common.Hash, int, int, bool) error |
||||||
|
RequestHeadersByNumber(uint64, int, int, bool) error |
||||||
|
} |
||||||
|
|
||||||
|
// Peer encapsulates the methods required to synchronise with a remote full peer.
|
||||||
|
type Peer interface { |
||||||
|
LightPeer |
||||||
|
RequestBodies([]common.Hash) error |
||||||
|
RequestReceipts([]common.Hash) error |
||||||
|
RequestNodeData([]common.Hash) error |
||||||
|
} |
||||||
|
|
||||||
|
// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.
|
||||||
|
type lightPeerWrapper struct { |
||||||
|
peer LightPeer |
||||||
|
} |
||||||
|
|
||||||
|
func (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() } |
||||||
|
func (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error { |
||||||
|
return w.peer.RequestHeadersByHash(h, amount, skip, reverse) |
||||||
|
} |
||||||
|
func (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error { |
||||||
|
return w.peer.RequestHeadersByNumber(i, amount, skip, reverse) |
||||||
|
} |
||||||
|
func (w *lightPeerWrapper) RequestBodies([]common.Hash) error { |
||||||
|
panic("RequestBodies not supported in light client mode sync") |
||||||
|
} |
||||||
|
func (w *lightPeerWrapper) RequestReceipts([]common.Hash) error { |
||||||
|
panic("RequestReceipts not supported in light client mode sync") |
||||||
|
} |
||||||
|
func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { |
||||||
|
panic("RequestNodeData not supported in light client mode sync") |
||||||
|
} |
||||||
|
|
||||||
|
// newPeerConnection creates a new downloader peer.
|
||||||
|
func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { |
||||||
|
return &peerConnection{ |
||||||
|
id: id, |
||||||
|
lacking: make(map[common.Hash]struct{}), |
||||||
|
peer: peer, |
||||||
|
version: version, |
||||||
|
log: logger, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Reset clears the internal state of a peer entity.
|
||||||
|
func (p *peerConnection) Reset() { |
||||||
|
p.lock.Lock() |
||||||
|
defer p.lock.Unlock() |
||||||
|
|
||||||
|
atomic.StoreInt32(&p.headerIdle, 0) |
||||||
|
atomic.StoreInt32(&p.blockIdle, 0) |
||||||
|
atomic.StoreInt32(&p.receiptIdle, 0) |
||||||
|
atomic.StoreInt32(&p.stateIdle, 0) |
||||||
|
|
||||||
|
p.lacking = make(map[common.Hash]struct{}) |
||||||
|
} |
||||||
|
|
||||||
|
// FetchHeaders sends a header retrieval request to the remote peer.
|
||||||
|
func (p *peerConnection) FetchHeaders(from uint64, count int) error { |
||||||
|
// Short circuit if the peer is already fetching
|
||||||
|
if !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) { |
||||||
|
return errAlreadyFetching |
||||||
|
} |
||||||
|
p.headerStarted = time.Now() |
||||||
|
|
||||||
|
// Issue the header retrieval request (absolute upwards without gaps)
|
||||||
|
go p.peer.RequestHeadersByNumber(from, count, 0, false) |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// FetchBodies sends a block body retrieval request to the remote peer.
|
||||||
|
func (p *peerConnection) FetchBodies(request *fetchRequest) error { |
||||||
|
// Short circuit if the peer is already fetching
|
||||||
|
if !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) { |
||||||
|
return errAlreadyFetching |
||||||
|
} |
||||||
|
p.blockStarted = time.Now() |
||||||
|
|
||||||
|
go func() { |
||||||
|
// Convert the header set to a retrievable slice
|
||||||
|
hashes := make([]common.Hash, 0, len(request.Headers)) |
||||||
|
for _, header := range request.Headers { |
||||||
|
hashes = append(hashes, header.Hash()) |
||||||
|
} |
||||||
|
p.peer.RequestBodies(hashes) |
||||||
|
}() |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// FetchReceipts sends a receipt retrieval request to the remote peer.
|
||||||
|
func (p *peerConnection) FetchReceipts(request *fetchRequest) error { |
||||||
|
// Short circuit if the peer is already fetching
|
||||||
|
if !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) { |
||||||
|
return errAlreadyFetching |
||||||
|
} |
||||||
|
p.receiptStarted = time.Now() |
||||||
|
|
||||||
|
go func() { |
||||||
|
// Convert the header set to a retrievable slice
|
||||||
|
hashes := make([]common.Hash, 0, len(request.Headers)) |
||||||
|
for _, header := range request.Headers { |
||||||
|
hashes = append(hashes, header.Hash()) |
||||||
|
} |
||||||
|
p.peer.RequestReceipts(hashes) |
||||||
|
}() |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// FetchNodeData sends a node state data retrieval request to the remote peer.
|
||||||
|
func (p *peerConnection) FetchNodeData(hashes []common.Hash) error { |
||||||
|
// Short circuit if the peer is already fetching
|
||||||
|
if !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) { |
||||||
|
return errAlreadyFetching |
||||||
|
} |
||||||
|
p.stateStarted = time.Now() |
||||||
|
|
||||||
|
go p.peer.RequestNodeData(hashes) |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval
|
||||||
|
// requests. Its estimated header retrieval throughput is updated with that measured
|
||||||
|
// just now.
|
||||||
|
func (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) { |
||||||
|
p.rates.Update(eth.BlockHeadersMsg, deliveryTime.Sub(p.headerStarted), delivered) |
||||||
|
atomic.StoreInt32(&p.headerIdle, 0) |
||||||
|
} |
||||||
|
|
||||||
|
// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval
|
||||||
|
// requests. Its estimated body retrieval throughput is updated with that measured
|
||||||
|
// just now.
|
||||||
|
func (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) { |
||||||
|
p.rates.Update(eth.BlockBodiesMsg, deliveryTime.Sub(p.blockStarted), delivered) |
||||||
|
atomic.StoreInt32(&p.blockIdle, 0) |
||||||
|
} |
||||||
|
|
||||||
|
// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt
|
||||||
|
// retrieval requests. Its estimated receipt retrieval throughput is updated
|
||||||
|
// with that measured just now.
|
||||||
|
func (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) { |
||||||
|
p.rates.Update(eth.ReceiptsMsg, deliveryTime.Sub(p.receiptStarted), delivered) |
||||||
|
atomic.StoreInt32(&p.receiptIdle, 0) |
||||||
|
} |
||||||
|
|
||||||
|
// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie
|
||||||
|
// data retrieval requests. Its estimated state retrieval throughput is updated
|
||||||
|
// with that measured just now.
|
||||||
|
func (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) { |
||||||
|
p.rates.Update(eth.NodeDataMsg, deliveryTime.Sub(p.stateStarted), delivered) |
||||||
|
atomic.StoreInt32(&p.stateIdle, 0) |
||||||
|
} |
||||||
|
|
||||||
|
// HeaderCapacity retrieves the peers header download allowance based on its
|
||||||
|
// previously discovered throughput.
|
||||||
|
func (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int { |
||||||
|
cap := p.rates.Capacity(eth.BlockHeadersMsg, targetRTT) |
||||||
|
if cap > MaxHeaderFetch { |
||||||
|
cap = MaxHeaderFetch |
||||||
|
} |
||||||
|
return cap |
||||||
|
} |
||||||
|
|
||||||
|
// BlockCapacity retrieves the peers block download allowance based on its
|
||||||
|
// previously discovered throughput.
|
||||||
|
func (p *peerConnection) BlockCapacity(targetRTT time.Duration) int { |
||||||
|
cap := p.rates.Capacity(eth.BlockBodiesMsg, targetRTT) |
||||||
|
if cap > MaxBlockFetch { |
||||||
|
cap = MaxBlockFetch |
||||||
|
} |
||||||
|
return cap |
||||||
|
} |
||||||
|
|
||||||
|
// ReceiptCapacity retrieves the peers receipt download allowance based on its
|
||||||
|
// previously discovered throughput.
|
||||||
|
func (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int { |
||||||
|
cap := p.rates.Capacity(eth.ReceiptsMsg, targetRTT) |
||||||
|
if cap > MaxReceiptFetch { |
||||||
|
cap = MaxReceiptFetch |
||||||
|
} |
||||||
|
return cap |
||||||
|
} |
||||||
|
|
||||||
|
// NodeDataCapacity retrieves the peers state download allowance based on its
|
||||||
|
// previously discovered throughput.
|
||||||
|
func (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int { |
||||||
|
cap := p.rates.Capacity(eth.NodeDataMsg, targetRTT) |
||||||
|
if cap > MaxStateFetch { |
||||||
|
cap = MaxStateFetch |
||||||
|
} |
||||||
|
return cap |
||||||
|
} |
||||||
|
|
||||||
|
// MarkLacking appends a new entity to the set of items (blocks, receipts, states)
|
||||||
|
// that a peer is known not to have (i.e. have been requested before). If the
|
||||||
|
// set reaches its maximum allowed capacity, items are randomly dropped off.
|
||||||
|
func (p *peerConnection) MarkLacking(hash common.Hash) { |
||||||
|
p.lock.Lock() |
||||||
|
defer p.lock.Unlock() |
||||||
|
|
||||||
|
for len(p.lacking) >= maxLackingHashes { |
||||||
|
for drop := range p.lacking { |
||||||
|
delete(p.lacking, drop) |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
p.lacking[hash] = struct{}{} |
||||||
|
} |
||||||
|
|
||||||
|
// Lacks retrieves whether the hash of a blockchain item is on the peers lacking
|
||||||
|
// list (i.e. whether we know that the peer does not have it).
|
||||||
|
func (p *peerConnection) Lacks(hash common.Hash) bool { |
||||||
|
p.lock.RLock() |
||||||
|
defer p.lock.RUnlock() |
||||||
|
|
||||||
|
_, ok := p.lacking[hash] |
||||||
|
return ok |
||||||
|
} |
||||||
|
|
||||||
|
// peerSet represents the collection of active peer participating in the chain
|
||||||
|
// download procedure.
|
||||||
|
type peerSet struct { |
||||||
|
peers map[string]*peerConnection |
||||||
|
rates *msgrate.Trackers // Set of rate trackers to give the sync a common beat
|
||||||
|
|
||||||
|
newPeerFeed event.Feed |
||||||
|
peerDropFeed event.Feed |
||||||
|
|
||||||
|
lock sync.RWMutex |
||||||
|
} |
||||||
|
|
||||||
|
// newPeerSet creates a new peer set top track the active download sources.
|
||||||
|
func newPeerSet() *peerSet { |
||||||
|
return &peerSet{ |
||||||
|
peers: make(map[string]*peerConnection), |
||||||
|
rates: msgrate.NewTrackers(log.New("proto", "eth")), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// SubscribeNewPeers subscribes to peer arrival events.
|
||||||
|
func (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription { |
||||||
|
return ps.newPeerFeed.Subscribe(ch) |
||||||
|
} |
||||||
|
|
||||||
|
// SubscribePeerDrops subscribes to peer departure events.
|
||||||
|
func (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription { |
||||||
|
return ps.peerDropFeed.Subscribe(ch) |
||||||
|
} |
||||||
|
|
||||||
|
// Reset iterates over the current peer set, and resets each of the known peers
|
||||||
|
// to prepare for a next batch of block retrieval.
|
||||||
|
func (ps *peerSet) Reset() { |
||||||
|
ps.lock.RLock() |
||||||
|
defer ps.lock.RUnlock() |
||||||
|
|
||||||
|
for _, peer := range ps.peers { |
||||||
|
peer.Reset() |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Register injects a new peer into the working set, or returns an error if the
|
||||||
|
// peer is already known.
|
||||||
|
//
|
||||||
|
// The method also sets the starting throughput values of the new peer to the
|
||||||
|
// average of all existing peers, to give it a realistic chance of being used
|
||||||
|
// for data retrievals.
|
||||||
|
func (ps *peerSet) Register(p *peerConnection) error { |
||||||
|
// Register the new peer with some meaningful defaults
|
||||||
|
ps.lock.Lock() |
||||||
|
if _, ok := ps.peers[p.id]; ok { |
||||||
|
ps.lock.Unlock() |
||||||
|
return errAlreadyRegistered |
||||||
|
} |
||||||
|
p.rates = msgrate.NewTracker(ps.rates.MeanCapacities(), ps.rates.MedianRoundTrip()) |
||||||
|
if err := ps.rates.Track(p.id, p.rates); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
ps.peers[p.id] = p |
||||||
|
ps.lock.Unlock() |
||||||
|
|
||||||
|
ps.newPeerFeed.Send(p) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Unregister removes a remote peer from the active set, disabling any further
|
||||||
|
// actions to/from that particular entity.
|
||||||
|
func (ps *peerSet) Unregister(id string) error { |
||||||
|
ps.lock.Lock() |
||||||
|
p, ok := ps.peers[id] |
||||||
|
if !ok { |
||||||
|
ps.lock.Unlock() |
||||||
|
return errNotRegistered |
||||||
|
} |
||||||
|
delete(ps.peers, id) |
||||||
|
ps.rates.Untrack(id) |
||||||
|
ps.lock.Unlock() |
||||||
|
|
||||||
|
ps.peerDropFeed.Send(p) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Peer retrieves the registered peer with the given id.
|
||||||
|
func (ps *peerSet) Peer(id string) *peerConnection { |
||||||
|
ps.lock.RLock() |
||||||
|
defer ps.lock.RUnlock() |
||||||
|
|
||||||
|
return ps.peers[id] |
||||||
|
} |
||||||
|
|
||||||
|
// Len returns if the current number of peers in the set.
|
||||||
|
func (ps *peerSet) Len() int { |
||||||
|
ps.lock.RLock() |
||||||
|
defer ps.lock.RUnlock() |
||||||
|
|
||||||
|
return len(ps.peers) |
||||||
|
} |
||||||
|
|
||||||
|
// AllPeers retrieves a flat list of all the peers within the set.
|
||||||
|
func (ps *peerSet) AllPeers() []*peerConnection { |
||||||
|
ps.lock.RLock() |
||||||
|
defer ps.lock.RUnlock() |
||||||
|
|
||||||
|
list := make([]*peerConnection, 0, len(ps.peers)) |
||||||
|
for _, p := range ps.peers { |
||||||
|
list = append(list, p) |
||||||
|
} |
||||||
|
return list |
||||||
|
} |
||||||
|
|
||||||
|
// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers
|
||||||
|
// within the active peer set, ordered by their reputation.
|
||||||
|
func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { |
||||||
|
idle := func(p *peerConnection) bool { |
||||||
|
return atomic.LoadInt32(&p.headerIdle) == 0 |
||||||
|
} |
||||||
|
throughput := func(p *peerConnection) int { |
||||||
|
return p.rates.Capacity(eth.BlockHeadersMsg, time.Second) |
||||||
|
} |
||||||
|
return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) |
||||||
|
} |
||||||
|
|
||||||
|
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
|
||||||
|
// the active peer set, ordered by their reputation.
|
||||||
|
func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { |
||||||
|
idle := func(p *peerConnection) bool { |
||||||
|
return atomic.LoadInt32(&p.blockIdle) == 0 |
||||||
|
} |
||||||
|
throughput := func(p *peerConnection) int { |
||||||
|
return p.rates.Capacity(eth.BlockBodiesMsg, time.Second) |
||||||
|
} |
||||||
|
return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) |
||||||
|
} |
||||||
|
|
||||||
|
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
|
||||||
|
// within the active peer set, ordered by their reputation.
|
||||||
|
func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { |
||||||
|
idle := func(p *peerConnection) bool { |
||||||
|
return atomic.LoadInt32(&p.receiptIdle) == 0 |
||||||
|
} |
||||||
|
throughput := func(p *peerConnection) int { |
||||||
|
return p.rates.Capacity(eth.ReceiptsMsg, time.Second) |
||||||
|
} |
||||||
|
return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) |
||||||
|
} |
||||||
|
|
||||||
|
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
|
||||||
|
// peers within the active peer set, ordered by their reputation.
|
||||||
|
func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { |
||||||
|
idle := func(p *peerConnection) bool { |
||||||
|
return atomic.LoadInt32(&p.stateIdle) == 0 |
||||||
|
} |
||||||
|
throughput := func(p *peerConnection) int { |
||||||
|
return p.rates.Capacity(eth.NodeDataMsg, time.Second) |
||||||
|
} |
||||||
|
return ps.idlePeers(eth.ETH66, eth.ETH66, idle, throughput) |
||||||
|
} |
||||||
|
|
||||||
|
// idlePeers retrieves a flat list of all currently idle peers satisfying the
|
||||||
|
// protocol version constraints, using the provided function to check idleness.
|
||||||
|
// The resulting set of peers are sorted by their capacity.
|
||||||
|
func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, capacity func(*peerConnection) int) ([]*peerConnection, int) { |
||||||
|
ps.lock.RLock() |
||||||
|
defer ps.lock.RUnlock() |
||||||
|
|
||||||
|
var ( |
||||||
|
total = 0 |
||||||
|
idle = make([]*peerConnection, 0, len(ps.peers)) |
||||||
|
tps = make([]int, 0, len(ps.peers)) |
||||||
|
) |
||||||
|
for _, p := range ps.peers { |
||||||
|
if p.version >= minProtocol && p.version <= maxProtocol { |
||||||
|
if idleCheck(p) { |
||||||
|
idle = append(idle, p) |
||||||
|
tps = append(tps, capacity(p)) |
||||||
|
} |
||||||
|
total++ |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// And sort them
|
||||||
|
sortPeers := &peerCapacitySort{idle, tps} |
||||||
|
sort.Sort(sortPeers) |
||||||
|
return sortPeers.p, total |
||||||
|
} |
||||||
|
|
||||||
|
// peerCapacitySort implements sort.Interface.
|
||||||
|
// It sorts peer connections by capacity (descending).
|
||||||
|
type peerCapacitySort struct { |
||||||
|
p []*peerConnection |
||||||
|
tp []int |
||||||
|
} |
||||||
|
|
||||||
|
func (ps *peerCapacitySort) Len() int { |
||||||
|
return len(ps.p) |
||||||
|
} |
||||||
|
|
||||||
|
func (ps *peerCapacitySort) Less(i, j int) bool { |
||||||
|
return ps.tp[i] > ps.tp[j] |
||||||
|
} |
||||||
|
|
||||||
|
func (ps *peerCapacitySort) Swap(i, j int) { |
||||||
|
ps.p[i], ps.p[j] = ps.p[j], ps.p[i] |
||||||
|
ps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i] |
||||||
|
} |
@ -0,0 +1,913 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// Contains the block download scheduler to collect download tasks and schedule
|
||||||
|
// them in an ordered, and throttled way.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"sync" |
||||||
|
"sync/atomic" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/common/prque" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/metrics" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
bodyType = uint(0) |
||||||
|
receiptType = uint(1) |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
blockCacheMaxItems = 8192 // Maximum number of blocks to cache before throttling the download
|
||||||
|
blockCacheInitialItems = 2048 // Initial number of blocks to start fetching, before we know the sizes of the blocks
|
||||||
|
blockCacheMemory = 256 * 1024 * 1024 // Maximum amount of memory to use for block caching
|
||||||
|
blockCacheSizeWeight = 0.1 // Multiplier to approximate the average block size based on past ones
|
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
errNoFetchesPending = errors.New("no fetches pending") |
||||||
|
errStaleDelivery = errors.New("stale delivery") |
||||||
|
) |
||||||
|
|
||||||
|
// fetchRequest is a currently running data retrieval operation.
|
||||||
|
type fetchRequest struct { |
||||||
|
Peer *peerConnection // Peer to which the request was sent
|
||||||
|
From uint64 // [eth/62] Requested chain element index (used for skeleton fills only)
|
||||||
|
Headers []*types.Header // [eth/62] Requested headers, sorted by request order
|
||||||
|
Time time.Time // Time when the request was made
|
||||||
|
} |
||||||
|
|
||||||
|
// fetchResult is a struct collecting partial results from data fetchers until
|
||||||
|
// all outstanding pieces complete and the result as a whole can be processed.
|
||||||
|
type fetchResult struct { |
||||||
|
pending int32 // Flag telling what deliveries are outstanding
|
||||||
|
|
||||||
|
Header *types.Header |
||||||
|
Uncles []*types.Header |
||||||
|
Transactions types.Transactions |
||||||
|
Receipts types.Receipts |
||||||
|
} |
||||||
|
|
||||||
|
func newFetchResult(header *types.Header, fastSync bool) *fetchResult { |
||||||
|
item := &fetchResult{ |
||||||
|
Header: header, |
||||||
|
} |
||||||
|
if !header.EmptyBody() { |
||||||
|
item.pending |= (1 << bodyType) |
||||||
|
} |
||||||
|
if fastSync && !header.EmptyReceipts() { |
||||||
|
item.pending |= (1 << receiptType) |
||||||
|
} |
||||||
|
return item |
||||||
|
} |
||||||
|
|
||||||
|
// SetBodyDone flags the body as finished.
|
||||||
|
func (f *fetchResult) SetBodyDone() { |
||||||
|
if v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 { |
||||||
|
atomic.AddInt32(&f.pending, -1) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// AllDone checks if item is done.
|
||||||
|
func (f *fetchResult) AllDone() bool { |
||||||
|
return atomic.LoadInt32(&f.pending) == 0 |
||||||
|
} |
||||||
|
|
||||||
|
// SetReceiptsDone flags the receipts as finished.
|
||||||
|
func (f *fetchResult) SetReceiptsDone() { |
||||||
|
if v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 { |
||||||
|
atomic.AddInt32(&f.pending, -2) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Done checks if the given type is done already
|
||||||
|
func (f *fetchResult) Done(kind uint) bool { |
||||||
|
v := atomic.LoadInt32(&f.pending) |
||||||
|
return v&(1<<kind) == 0 |
||||||
|
} |
||||||
|
|
||||||
|
// queue represents hashes that are either need fetching or are being fetched
|
||||||
|
type queue struct { |
||||||
|
mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching
|
||||||
|
|
||||||
|
// Headers are "special", they download in batches, supported by a skeleton chain
|
||||||
|
headerHead common.Hash // Hash of the last queued header to verify order
|
||||||
|
headerTaskPool map[uint64]*types.Header // Pending header retrieval tasks, mapping starting indexes to skeleton headers
|
||||||
|
headerTaskQueue *prque.Prque // Priority queue of the skeleton indexes to fetch the filling headers for
|
||||||
|
headerPeerMiss map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable
|
||||||
|
headerPendPool map[string]*fetchRequest // Currently pending header retrieval operations
|
||||||
|
headerResults []*types.Header // Result cache accumulating the completed headers
|
||||||
|
headerProced int // Number of headers already processed from the results
|
||||||
|
headerOffset uint64 // Number of the first header in the result cache
|
||||||
|
headerContCh chan bool // Channel to notify when header download finishes
|
||||||
|
|
||||||
|
// All data retrievals below are based on an already assembles header chain
|
||||||
|
blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers
|
||||||
|
blockTaskQueue *prque.Prque // Priority queue of the headers to fetch the blocks (bodies) for
|
||||||
|
blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations
|
||||||
|
|
||||||
|
receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers
|
||||||
|
receiptTaskQueue *prque.Prque // Priority queue of the headers to fetch the receipts for
|
||||||
|
receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations
|
||||||
|
|
||||||
|
resultCache *resultStore // Downloaded but not yet delivered fetch results
|
||||||
|
resultSize common.StorageSize // Approximate size of a block (exponential moving average)
|
||||||
|
|
||||||
|
lock *sync.RWMutex |
||||||
|
active *sync.Cond |
||||||
|
closed bool |
||||||
|
|
||||||
|
lastStatLog time.Time |
||||||
|
} |
||||||
|
|
||||||
|
// newQueue creates a new download queue for scheduling block retrieval.
|
||||||
|
func newQueue(blockCacheLimit int, thresholdInitialSize int) *queue { |
||||||
|
lock := new(sync.RWMutex) |
||||||
|
q := &queue{ |
||||||
|
headerContCh: make(chan bool), |
||||||
|
blockTaskQueue: prque.New(nil), |
||||||
|
receiptTaskQueue: prque.New(nil), |
||||||
|
active: sync.NewCond(lock), |
||||||
|
lock: lock, |
||||||
|
} |
||||||
|
q.Reset(blockCacheLimit, thresholdInitialSize) |
||||||
|
return q |
||||||
|
} |
||||||
|
|
||||||
|
// Reset clears out the queue contents.
|
||||||
|
func (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
q.closed = false |
||||||
|
q.mode = FullSync |
||||||
|
|
||||||
|
q.headerHead = common.Hash{} |
||||||
|
q.headerPendPool = make(map[string]*fetchRequest) |
||||||
|
|
||||||
|
q.blockTaskPool = make(map[common.Hash]*types.Header) |
||||||
|
q.blockTaskQueue.Reset() |
||||||
|
q.blockPendPool = make(map[string]*fetchRequest) |
||||||
|
|
||||||
|
q.receiptTaskPool = make(map[common.Hash]*types.Header) |
||||||
|
q.receiptTaskQueue.Reset() |
||||||
|
q.receiptPendPool = make(map[string]*fetchRequest) |
||||||
|
|
||||||
|
q.resultCache = newResultStore(blockCacheLimit) |
||||||
|
q.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize)) |
||||||
|
} |
||||||
|
|
||||||
|
// Close marks the end of the sync, unblocking Results.
|
||||||
|
// It may be called even if the queue is already closed.
|
||||||
|
func (q *queue) Close() { |
||||||
|
q.lock.Lock() |
||||||
|
q.closed = true |
||||||
|
q.active.Signal() |
||||||
|
q.lock.Unlock() |
||||||
|
} |
||||||
|
|
||||||
|
// PendingHeaders retrieves the number of header requests pending for retrieval.
|
||||||
|
func (q *queue) PendingHeaders() int { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return q.headerTaskQueue.Size() |
||||||
|
} |
||||||
|
|
||||||
|
// PendingBlocks retrieves the number of block (body) requests pending for retrieval.
|
||||||
|
func (q *queue) PendingBlocks() int { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return q.blockTaskQueue.Size() |
||||||
|
} |
||||||
|
|
||||||
|
// PendingReceipts retrieves the number of block receipts pending for retrieval.
|
||||||
|
func (q *queue) PendingReceipts() int { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return q.receiptTaskQueue.Size() |
||||||
|
} |
||||||
|
|
||||||
|
// InFlightHeaders retrieves whether there are header fetch requests currently
|
||||||
|
// in flight.
|
||||||
|
func (q *queue) InFlightHeaders() bool { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return len(q.headerPendPool) > 0 |
||||||
|
} |
||||||
|
|
||||||
|
// InFlightBlocks retrieves whether there are block fetch requests currently in
|
||||||
|
// flight.
|
||||||
|
func (q *queue) InFlightBlocks() bool { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return len(q.blockPendPool) > 0 |
||||||
|
} |
||||||
|
|
||||||
|
// InFlightReceipts retrieves whether there are receipt fetch requests currently
|
||||||
|
// in flight.
|
||||||
|
func (q *queue) InFlightReceipts() bool { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return len(q.receiptPendPool) > 0 |
||||||
|
} |
||||||
|
|
||||||
|
// Idle returns if the queue is fully idle or has some data still inside.
|
||||||
|
func (q *queue) Idle() bool { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
queued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size() |
||||||
|
pending := len(q.blockPendPool) + len(q.receiptPendPool) |
||||||
|
|
||||||
|
return (queued + pending) == 0 |
||||||
|
} |
||||||
|
|
||||||
|
// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill
|
||||||
|
// up an already retrieved header skeleton.
|
||||||
|
func (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)
|
||||||
|
if q.headerResults != nil { |
||||||
|
panic("skeleton assembly already in progress") |
||||||
|
} |
||||||
|
// Schedule all the header retrieval tasks for the skeleton assembly
|
||||||
|
q.headerTaskPool = make(map[uint64]*types.Header) |
||||||
|
q.headerTaskQueue = prque.New(nil) |
||||||
|
q.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains
|
||||||
|
q.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch) |
||||||
|
q.headerProced = 0 |
||||||
|
q.headerOffset = from |
||||||
|
q.headerContCh = make(chan bool, 1) |
||||||
|
|
||||||
|
for i, header := range skeleton { |
||||||
|
index := from + uint64(i*MaxHeaderFetch) |
||||||
|
|
||||||
|
q.headerTaskPool[index] = header |
||||||
|
q.headerTaskQueue.Push(index, -int64(index)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// RetrieveHeaders retrieves the header chain assemble based on the scheduled
|
||||||
|
// skeleton.
|
||||||
|
func (q *queue) RetrieveHeaders() ([]*types.Header, int) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
headers, proced := q.headerResults, q.headerProced |
||||||
|
q.headerResults, q.headerProced = nil, 0 |
||||||
|
|
||||||
|
return headers, proced |
||||||
|
} |
||||||
|
|
||||||
|
// Schedule adds a set of headers for the download queue for scheduling, returning
|
||||||
|
// the new headers encountered.
|
||||||
|
func (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
// Insert all the headers prioritised by the contained block number
|
||||||
|
inserts := make([]*types.Header, 0, len(headers)) |
||||||
|
for _, header := range headers { |
||||||
|
// Make sure chain order is honoured and preserved throughout
|
||||||
|
hash := header.Hash() |
||||||
|
if header.Number == nil || header.Number.Uint64() != from { |
||||||
|
log.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", from) |
||||||
|
break |
||||||
|
} |
||||||
|
if q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash { |
||||||
|
log.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) |
||||||
|
break |
||||||
|
} |
||||||
|
// Make sure no duplicate requests are executed
|
||||||
|
// We cannot skip this, even if the block is empty, since this is
|
||||||
|
// what triggers the fetchResult creation.
|
||||||
|
if _, ok := q.blockTaskPool[hash]; ok { |
||||||
|
log.Warn("Header already scheduled for block fetch", "number", header.Number, "hash", hash) |
||||||
|
} else { |
||||||
|
q.blockTaskPool[hash] = header |
||||||
|
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) |
||||||
|
} |
||||||
|
// Queue for receipt retrieval
|
||||||
|
if q.mode == FastSync && !header.EmptyReceipts() { |
||||||
|
if _, ok := q.receiptTaskPool[hash]; ok { |
||||||
|
log.Warn("Header already scheduled for receipt fetch", "number", header.Number, "hash", hash) |
||||||
|
} else { |
||||||
|
q.receiptTaskPool[hash] = header |
||||||
|
q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) |
||||||
|
} |
||||||
|
} |
||||||
|
inserts = append(inserts, header) |
||||||
|
q.headerHead = hash |
||||||
|
from++ |
||||||
|
} |
||||||
|
return inserts |
||||||
|
} |
||||||
|
|
||||||
|
// Results retrieves and permanently removes a batch of fetch results from
|
||||||
|
// the cache. the result slice will be empty if the queue has been closed.
|
||||||
|
// Results can be called concurrently with Deliver and Schedule,
|
||||||
|
// but assumes that there are not two simultaneous callers to Results
|
||||||
|
func (q *queue) Results(block bool) []*fetchResult { |
||||||
|
// Abort early if there are no items and non-blocking requested
|
||||||
|
if !block && !q.resultCache.HasCompletedItems() { |
||||||
|
return nil |
||||||
|
} |
||||||
|
closed := false |
||||||
|
for !closed && !q.resultCache.HasCompletedItems() { |
||||||
|
// In order to wait on 'active', we need to obtain the lock.
|
||||||
|
// That may take a while, if someone is delivering at the same
|
||||||
|
// time, so after obtaining the lock, we check again if there
|
||||||
|
// are any results to fetch.
|
||||||
|
// Also, in-between we ask for the lock and the lock is obtained,
|
||||||
|
// someone can have closed the queue. In that case, we should
|
||||||
|
// return the available results and stop blocking
|
||||||
|
q.lock.Lock() |
||||||
|
if q.resultCache.HasCompletedItems() || q.closed { |
||||||
|
q.lock.Unlock() |
||||||
|
break |
||||||
|
} |
||||||
|
// No items available, and not closed
|
||||||
|
q.active.Wait() |
||||||
|
closed = q.closed |
||||||
|
q.lock.Unlock() |
||||||
|
} |
||||||
|
// Regardless if closed or not, we can still deliver whatever we have
|
||||||
|
results := q.resultCache.GetCompleted(maxResultsProcess) |
||||||
|
for _, result := range results { |
||||||
|
// Recalculate the result item weights to prevent memory exhaustion
|
||||||
|
size := result.Header.Size() |
||||||
|
for _, uncle := range result.Uncles { |
||||||
|
size += uncle.Size() |
||||||
|
} |
||||||
|
for _, receipt := range result.Receipts { |
||||||
|
size += receipt.Size() |
||||||
|
} |
||||||
|
for _, tx := range result.Transactions { |
||||||
|
size += tx.Size() |
||||||
|
} |
||||||
|
q.resultSize = common.StorageSize(blockCacheSizeWeight)*size + |
||||||
|
(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize |
||||||
|
} |
||||||
|
// Using the newly calibrated resultsize, figure out the new throttle limit
|
||||||
|
// on the result cache
|
||||||
|
throttleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize) |
||||||
|
throttleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold) |
||||||
|
|
||||||
|
// Log some info at certain times
|
||||||
|
if time.Since(q.lastStatLog) > 60*time.Second { |
||||||
|
q.lastStatLog = time.Now() |
||||||
|
info := q.Stats() |
||||||
|
info = append(info, "throttle", throttleThreshold) |
||||||
|
log.Info("Downloader queue stats", info...) |
||||||
|
} |
||||||
|
return results |
||||||
|
} |
||||||
|
|
||||||
|
func (q *queue) Stats() []interface{} { |
||||||
|
q.lock.RLock() |
||||||
|
defer q.lock.RUnlock() |
||||||
|
|
||||||
|
return q.stats() |
||||||
|
} |
||||||
|
|
||||||
|
func (q *queue) stats() []interface{} { |
||||||
|
return []interface{}{ |
||||||
|
"receiptTasks", q.receiptTaskQueue.Size(), |
||||||
|
"blockTasks", q.blockTaskQueue.Size(), |
||||||
|
"itemSize", q.resultSize, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ReserveHeaders reserves a set of headers for the given peer, skipping any
|
||||||
|
// previously failed batches.
|
||||||
|
func (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
// Short circuit if the peer's already downloading something (sanity check to
|
||||||
|
// not corrupt state)
|
||||||
|
if _, ok := q.headerPendPool[p.id]; ok { |
||||||
|
return nil |
||||||
|
} |
||||||
|
// Retrieve a batch of hashes, skipping previously failed ones
|
||||||
|
send, skip := uint64(0), []uint64{} |
||||||
|
for send == 0 && !q.headerTaskQueue.Empty() { |
||||||
|
from, _ := q.headerTaskQueue.Pop() |
||||||
|
if q.headerPeerMiss[p.id] != nil { |
||||||
|
if _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok { |
||||||
|
skip = append(skip, from.(uint64)) |
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
send = from.(uint64) |
||||||
|
} |
||||||
|
// Merge all the skipped batches back
|
||||||
|
for _, from := range skip { |
||||||
|
q.headerTaskQueue.Push(from, -int64(from)) |
||||||
|
} |
||||||
|
// Assemble and return the block download request
|
||||||
|
if send == 0 { |
||||||
|
return nil |
||||||
|
} |
||||||
|
request := &fetchRequest{ |
||||||
|
Peer: p, |
||||||
|
From: send, |
||||||
|
Time: time.Now(), |
||||||
|
} |
||||||
|
q.headerPendPool[p.id] = request |
||||||
|
return request |
||||||
|
} |
||||||
|
|
||||||
|
// ReserveBodies reserves a set of body fetches for the given peer, skipping any
|
||||||
|
// previously failed downloads. Beside the next batch of needed fetches, it also
|
||||||
|
// returns a flag whether empty blocks were queued requiring processing.
|
||||||
|
func (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType) |
||||||
|
} |
||||||
|
|
||||||
|
// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping
|
||||||
|
// any previously failed downloads. Beside the next batch of needed fetches, it
|
||||||
|
// also returns a flag whether empty receipts were queued requiring importing.
|
||||||
|
func (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType) |
||||||
|
} |
||||||
|
|
||||||
|
// reserveHeaders reserves a set of data download operations for a given peer,
|
||||||
|
// skipping any previously failed ones. This method is a generic version used
|
||||||
|
// by the individual special reservation functions.
|
||||||
|
//
|
||||||
|
// Note, this method expects the queue lock to be already held for writing. The
|
||||||
|
// reason the lock is not obtained in here is because the parameters already need
|
||||||
|
// to access the queue, so they already need a lock anyway.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// item - the fetchRequest
|
||||||
|
// progress - whether any progress was made
|
||||||
|
// throttle - if the caller should throttle for a while
|
||||||
|
func (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque, |
||||||
|
pendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) { |
||||||
|
// Short circuit if the pool has been depleted, or if the peer's already
|
||||||
|
// downloading something (sanity check not to corrupt state)
|
||||||
|
if taskQueue.Empty() { |
||||||
|
return nil, false, true |
||||||
|
} |
||||||
|
if _, ok := pendPool[p.id]; ok { |
||||||
|
return nil, false, false |
||||||
|
} |
||||||
|
// Retrieve a batch of tasks, skipping previously failed ones
|
||||||
|
send := make([]*types.Header, 0, count) |
||||||
|
skip := make([]*types.Header, 0) |
||||||
|
progress := false |
||||||
|
throttled := false |
||||||
|
for proc := 0; len(send) < count && !taskQueue.Empty(); proc++ { |
||||||
|
// the task queue will pop items in order, so the highest prio block
|
||||||
|
// is also the lowest block number.
|
||||||
|
h, _ := taskQueue.Peek() |
||||||
|
header := h.(*types.Header) |
||||||
|
// we can ask the resultcache if this header is within the
|
||||||
|
// "prioritized" segment of blocks. If it is not, we need to throttle
|
||||||
|
|
||||||
|
stale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync) |
||||||
|
if stale { |
||||||
|
// Don't put back in the task queue, this item has already been
|
||||||
|
// delivered upstream
|
||||||
|
taskQueue.PopItem() |
||||||
|
progress = true |
||||||
|
delete(taskPool, header.Hash()) |
||||||
|
proc = proc - 1 |
||||||
|
log.Error("Fetch reservation already delivered", "number", header.Number.Uint64()) |
||||||
|
continue |
||||||
|
} |
||||||
|
if throttle { |
||||||
|
// There are no resultslots available. Leave it in the task queue
|
||||||
|
// However, if there are any left as 'skipped', we should not tell
|
||||||
|
// the caller to throttle, since we still want some other
|
||||||
|
// peer to fetch those for us
|
||||||
|
throttled = len(skip) == 0 |
||||||
|
break |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
// this most definitely should _not_ happen
|
||||||
|
log.Warn("Failed to reserve headers", "err", err) |
||||||
|
// There are no resultslots available. Leave it in the task queue
|
||||||
|
break |
||||||
|
} |
||||||
|
if item.Done(kind) { |
||||||
|
// If it's a noop, we can skip this task
|
||||||
|
delete(taskPool, header.Hash()) |
||||||
|
taskQueue.PopItem() |
||||||
|
proc = proc - 1 |
||||||
|
progress = true |
||||||
|
continue |
||||||
|
} |
||||||
|
// Remove it from the task queue
|
||||||
|
taskQueue.PopItem() |
||||||
|
// Otherwise unless the peer is known not to have the data, add to the retrieve list
|
||||||
|
if p.Lacks(header.Hash()) { |
||||||
|
skip = append(skip, header) |
||||||
|
} else { |
||||||
|
send = append(send, header) |
||||||
|
} |
||||||
|
} |
||||||
|
// Merge all the skipped headers back
|
||||||
|
for _, header := range skip { |
||||||
|
taskQueue.Push(header, -int64(header.Number.Uint64())) |
||||||
|
} |
||||||
|
if q.resultCache.HasCompletedItems() { |
||||||
|
// Wake Results, resultCache was modified
|
||||||
|
q.active.Signal() |
||||||
|
} |
||||||
|
// Assemble and return the block download request
|
||||||
|
if len(send) == 0 { |
||||||
|
return nil, progress, throttled |
||||||
|
} |
||||||
|
request := &fetchRequest{ |
||||||
|
Peer: p, |
||||||
|
Headers: send, |
||||||
|
Time: time.Now(), |
||||||
|
} |
||||||
|
pendPool[p.id] = request |
||||||
|
return request, progress, throttled |
||||||
|
} |
||||||
|
|
||||||
|
// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.
|
||||||
|
func (q *queue) CancelHeaders(request *fetchRequest) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
q.cancel(request, q.headerTaskQueue, q.headerPendPool) |
||||||
|
} |
||||||
|
|
||||||
|
// CancelBodies aborts a body fetch request, returning all pending headers to the
|
||||||
|
// task queue.
|
||||||
|
func (q *queue) CancelBodies(request *fetchRequest) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
q.cancel(request, q.blockTaskQueue, q.blockPendPool) |
||||||
|
} |
||||||
|
|
||||||
|
// CancelReceipts aborts a body fetch request, returning all pending headers to
|
||||||
|
// the task queue.
|
||||||
|
func (q *queue) CancelReceipts(request *fetchRequest) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
q.cancel(request, q.receiptTaskQueue, q.receiptPendPool) |
||||||
|
} |
||||||
|
|
||||||
|
// Cancel aborts a fetch request, returning all pending hashes to the task queue.
|
||||||
|
func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) { |
||||||
|
if request.From > 0 { |
||||||
|
taskQueue.Push(request.From, -int64(request.From)) |
||||||
|
} |
||||||
|
for _, header := range request.Headers { |
||||||
|
taskQueue.Push(header, -int64(header.Number.Uint64())) |
||||||
|
} |
||||||
|
delete(pendPool, request.Peer.id) |
||||||
|
} |
||||||
|
|
||||||
|
// Revoke cancels all pending requests belonging to a given peer. This method is
|
||||||
|
// meant to be called during a peer drop to quickly reassign owned data fetches
|
||||||
|
// to remaining nodes.
|
||||||
|
func (q *queue) Revoke(peerID string) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
if request, ok := q.blockPendPool[peerID]; ok { |
||||||
|
for _, header := range request.Headers { |
||||||
|
q.blockTaskQueue.Push(header, -int64(header.Number.Uint64())) |
||||||
|
} |
||||||
|
delete(q.blockPendPool, peerID) |
||||||
|
} |
||||||
|
if request, ok := q.receiptPendPool[peerID]; ok { |
||||||
|
for _, header := range request.Headers { |
||||||
|
q.receiptTaskQueue.Push(header, -int64(header.Number.Uint64())) |
||||||
|
} |
||||||
|
delete(q.receiptPendPool, peerID) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// ExpireHeaders checks for in flight requests that exceeded a timeout allowance,
|
||||||
|
// canceling them and returning the responsible peers for penalisation.
|
||||||
|
func (q *queue) ExpireHeaders(timeout time.Duration) map[string]int { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter) |
||||||
|
} |
||||||
|
|
||||||
|
// ExpireBodies checks for in flight block body requests that exceeded a timeout
|
||||||
|
// allowance, canceling them and returning the responsible peers for penalisation.
|
||||||
|
func (q *queue) ExpireBodies(timeout time.Duration) map[string]int { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter) |
||||||
|
} |
||||||
|
|
||||||
|
// ExpireReceipts checks for in flight receipt requests that exceeded a timeout
|
||||||
|
// allowance, canceling them and returning the responsible peers for penalisation.
|
||||||
|
func (q *queue) ExpireReceipts(timeout time.Duration) map[string]int { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
return q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter) |
||||||
|
} |
||||||
|
|
||||||
|
// expire is the generic check that move expired tasks from a pending pool back
|
||||||
|
// into a task pool, returning all entities caught with expired tasks.
|
||||||
|
//
|
||||||
|
// Note, this method expects the queue lock to be already held. The
|
||||||
|
// reason the lock is not obtained in here is because the parameters already need
|
||||||
|
// to access the queue, so they already need a lock anyway.
|
||||||
|
func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int { |
||||||
|
// Iterate over the expired requests and return each to the queue
|
||||||
|
expiries := make(map[string]int) |
||||||
|
for id, request := range pendPool { |
||||||
|
if time.Since(request.Time) > timeout { |
||||||
|
// Update the metrics with the timeout
|
||||||
|
timeoutMeter.Mark(1) |
||||||
|
|
||||||
|
// Return any non satisfied requests to the pool
|
||||||
|
if request.From > 0 { |
||||||
|
taskQueue.Push(request.From, -int64(request.From)) |
||||||
|
} |
||||||
|
for _, header := range request.Headers { |
||||||
|
taskQueue.Push(header, -int64(header.Number.Uint64())) |
||||||
|
} |
||||||
|
// Add the peer to the expiry report along the number of failed requests
|
||||||
|
expiries[id] = len(request.Headers) |
||||||
|
|
||||||
|
// Remove the expired requests from the pending pool directly
|
||||||
|
delete(pendPool, id) |
||||||
|
} |
||||||
|
} |
||||||
|
return expiries |
||||||
|
} |
||||||
|
|
||||||
|
// DeliverHeaders injects a header retrieval response into the header results
|
||||||
|
// cache. This method either accepts all headers it received, or none of them
|
||||||
|
// if they do not map correctly to the skeleton.
|
||||||
|
//
|
||||||
|
// If the headers are accepted, the method makes an attempt to deliver the set
|
||||||
|
// of ready headers to the processor to keep the pipeline full. However it will
|
||||||
|
// not block to prevent stalling other pending deliveries.
|
||||||
|
func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
var logger log.Logger |
||||||
|
if len(id) < 16 { |
||||||
|
// Tests use short IDs, don't choke on them
|
||||||
|
logger = log.New("peer", id) |
||||||
|
} else { |
||||||
|
logger = log.New("peer", id[:16]) |
||||||
|
} |
||||||
|
// Short circuit if the data was never requested
|
||||||
|
request := q.headerPendPool[id] |
||||||
|
if request == nil { |
||||||
|
return 0, errNoFetchesPending |
||||||
|
} |
||||||
|
headerReqTimer.UpdateSince(request.Time) |
||||||
|
delete(q.headerPendPool, id) |
||||||
|
|
||||||
|
// Ensure headers can be mapped onto the skeleton chain
|
||||||
|
target := q.headerTaskPool[request.From].Hash() |
||||||
|
|
||||||
|
accepted := len(headers) == MaxHeaderFetch |
||||||
|
if accepted { |
||||||
|
if headers[0].Number.Uint64() != request.From { |
||||||
|
logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From) |
||||||
|
accepted = false |
||||||
|
} else if headers[len(headers)-1].Hash() != target { |
||||||
|
logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) |
||||||
|
accepted = false |
||||||
|
} |
||||||
|
} |
||||||
|
if accepted { |
||||||
|
parentHash := headers[0].Hash() |
||||||
|
for i, header := range headers[1:] { |
||||||
|
hash := header.Hash() |
||||||
|
if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { |
||||||
|
logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want) |
||||||
|
accepted = false |
||||||
|
break |
||||||
|
} |
||||||
|
if parentHash != header.ParentHash { |
||||||
|
logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) |
||||||
|
accepted = false |
||||||
|
break |
||||||
|
} |
||||||
|
// Set-up parent hash for next round
|
||||||
|
parentHash = hash |
||||||
|
} |
||||||
|
} |
||||||
|
// If the batch of headers wasn't accepted, mark as unavailable
|
||||||
|
if !accepted { |
||||||
|
logger.Trace("Skeleton filling not accepted", "from", request.From) |
||||||
|
|
||||||
|
miss := q.headerPeerMiss[id] |
||||||
|
if miss == nil { |
||||||
|
q.headerPeerMiss[id] = make(map[uint64]struct{}) |
||||||
|
miss = q.headerPeerMiss[id] |
||||||
|
} |
||||||
|
miss[request.From] = struct{}{} |
||||||
|
|
||||||
|
q.headerTaskQueue.Push(request.From, -int64(request.From)) |
||||||
|
return 0, errors.New("delivery not accepted") |
||||||
|
} |
||||||
|
// Clean up a successful fetch and try to deliver any sub-results
|
||||||
|
copy(q.headerResults[request.From-q.headerOffset:], headers) |
||||||
|
delete(q.headerTaskPool, request.From) |
||||||
|
|
||||||
|
ready := 0 |
||||||
|
for q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil { |
||||||
|
ready += MaxHeaderFetch |
||||||
|
} |
||||||
|
if ready > 0 { |
||||||
|
// Headers are ready for delivery, gather them and push forward (non blocking)
|
||||||
|
process := make([]*types.Header, ready) |
||||||
|
copy(process, q.headerResults[q.headerProced:q.headerProced+ready]) |
||||||
|
|
||||||
|
select { |
||||||
|
case headerProcCh <- process: |
||||||
|
logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number) |
||||||
|
q.headerProced += len(process) |
||||||
|
default: |
||||||
|
} |
||||||
|
} |
||||||
|
// Check for termination and return
|
||||||
|
if len(q.headerTaskPool) == 0 { |
||||||
|
q.headerContCh <- false |
||||||
|
} |
||||||
|
return len(headers), nil |
||||||
|
} |
||||||
|
|
||||||
|
// DeliverBodies injects a block body retrieval response into the results queue.
|
||||||
|
// The method returns the number of blocks bodies accepted from the delivery and
|
||||||
|
// also wakes any threads waiting for data delivery.
|
||||||
|
func (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
trieHasher := trie.NewStackTrie(nil) |
||||||
|
validate := func(index int, header *types.Header) error { |
||||||
|
if types.DeriveSha(types.Transactions(txLists[index]), trieHasher) != header.TxHash { |
||||||
|
return errInvalidBody |
||||||
|
} |
||||||
|
if types.CalcUncleHash(uncleLists[index]) != header.UncleHash { |
||||||
|
return errInvalidBody |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
reconstruct := func(index int, result *fetchResult) { |
||||||
|
result.Transactions = txLists[index] |
||||||
|
result.Uncles = uncleLists[index] |
||||||
|
result.SetBodyDone() |
||||||
|
} |
||||||
|
return q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, |
||||||
|
bodyReqTimer, len(txLists), validate, reconstruct) |
||||||
|
} |
||||||
|
|
||||||
|
// DeliverReceipts injects a receipt retrieval response into the results queue.
|
||||||
|
// The method returns the number of transaction receipts accepted from the delivery
|
||||||
|
// and also wakes any threads waiting for data delivery.
|
||||||
|
func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
trieHasher := trie.NewStackTrie(nil) |
||||||
|
validate := func(index int, header *types.Header) error { |
||||||
|
if types.DeriveSha(types.Receipts(receiptList[index]), trieHasher) != header.ReceiptHash { |
||||||
|
return errInvalidReceipt |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
reconstruct := func(index int, result *fetchResult) { |
||||||
|
result.Receipts = receiptList[index] |
||||||
|
result.SetReceiptsDone() |
||||||
|
} |
||||||
|
return q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, |
||||||
|
receiptReqTimer, len(receiptList), validate, reconstruct) |
||||||
|
} |
||||||
|
|
||||||
|
// deliver injects a data retrieval response into the results queue.
|
||||||
|
//
|
||||||
|
// Note, this method expects the queue lock to be already held for writing. The
|
||||||
|
// reason this lock is not obtained in here is because the parameters already need
|
||||||
|
// to access the queue, so they already need a lock anyway.
|
||||||
|
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, |
||||||
|
taskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer, |
||||||
|
results int, validate func(index int, header *types.Header) error, |
||||||
|
reconstruct func(index int, result *fetchResult)) (int, error) { |
||||||
|
|
||||||
|
// Short circuit if the data was never requested
|
||||||
|
request := pendPool[id] |
||||||
|
if request == nil { |
||||||
|
return 0, errNoFetchesPending |
||||||
|
} |
||||||
|
reqTimer.UpdateSince(request.Time) |
||||||
|
delete(pendPool, id) |
||||||
|
|
||||||
|
// If no data items were retrieved, mark them as unavailable for the origin peer
|
||||||
|
if results == 0 { |
||||||
|
for _, header := range request.Headers { |
||||||
|
request.Peer.MarkLacking(header.Hash()) |
||||||
|
} |
||||||
|
} |
||||||
|
// Assemble each of the results with their headers and retrieved data parts
|
||||||
|
var ( |
||||||
|
accepted int |
||||||
|
failure error |
||||||
|
i int |
||||||
|
hashes []common.Hash |
||||||
|
) |
||||||
|
for _, header := range request.Headers { |
||||||
|
// Short circuit assembly if no more fetch results are found
|
||||||
|
if i >= results { |
||||||
|
break |
||||||
|
} |
||||||
|
// Validate the fields
|
||||||
|
if err := validate(i, header); err != nil { |
||||||
|
failure = err |
||||||
|
break |
||||||
|
} |
||||||
|
hashes = append(hashes, header.Hash()) |
||||||
|
i++ |
||||||
|
} |
||||||
|
|
||||||
|
for _, header := range request.Headers[:i] { |
||||||
|
if res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil { |
||||||
|
reconstruct(accepted, res) |
||||||
|
} else { |
||||||
|
// else: betweeen here and above, some other peer filled this result,
|
||||||
|
// or it was indeed a no-op. This should not happen, but if it does it's
|
||||||
|
// not something to panic about
|
||||||
|
log.Error("Delivery stale", "stale", stale, "number", header.Number.Uint64(), "err", err) |
||||||
|
failure = errStaleDelivery |
||||||
|
} |
||||||
|
// Clean up a successful fetch
|
||||||
|
delete(taskPool, hashes[accepted]) |
||||||
|
accepted++ |
||||||
|
} |
||||||
|
// Return all failed or missing fetches to the queue
|
||||||
|
for _, header := range request.Headers[accepted:] { |
||||||
|
taskQueue.Push(header, -int64(header.Number.Uint64())) |
||||||
|
} |
||||||
|
// Wake up Results
|
||||||
|
if accepted > 0 { |
||||||
|
q.active.Signal() |
||||||
|
} |
||||||
|
if failure == nil { |
||||||
|
return accepted, nil |
||||||
|
} |
||||||
|
// If none of the data was good, it's a stale delivery
|
||||||
|
if accepted > 0 { |
||||||
|
return accepted, fmt.Errorf("partial failure: %v", failure) |
||||||
|
} |
||||||
|
return accepted, fmt.Errorf("%w: %v", failure, errStaleDelivery) |
||||||
|
} |
||||||
|
|
||||||
|
// Prepare configures the result cache to allow accepting and caching inbound
|
||||||
|
// fetch results.
|
||||||
|
func (q *queue) Prepare(offset uint64, mode SyncMode) { |
||||||
|
q.lock.Lock() |
||||||
|
defer q.lock.Unlock() |
||||||
|
|
||||||
|
// Prepare the queue for sync results
|
||||||
|
q.resultCache.Prepare(offset) |
||||||
|
q.mode = mode |
||||||
|
} |
@ -0,0 +1,452 @@ |
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"math/big" |
||||||
|
"math/rand" |
||||||
|
"sync" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||||
|
"github.com/ethereum/go-ethereum/core" |
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/params" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
testdb = rawdb.NewMemoryDatabase() |
||||||
|
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000000000)) |
||||||
|
) |
||||||
|
|
||||||
|
// makeChain creates a chain of n blocks starting at and including parent.
|
||||||
|
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
||||||
|
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||||
|
// reassembly.
|
||||||
|
func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) { |
||||||
|
blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) { |
||||||
|
block.SetCoinbase(common.Address{seed}) |
||||||
|
// Add one tx to every secondblock
|
||||||
|
if !empty && i%2 == 0 { |
||||||
|
signer := types.MakeSigner(params.TestChainConfig, block.Number()) |
||||||
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
block.AddTx(tx) |
||||||
|
} |
||||||
|
}) |
||||||
|
return blocks, receipts |
||||||
|
} |
||||||
|
|
||||||
|
type chainData struct { |
||||||
|
blocks []*types.Block |
||||||
|
offset int |
||||||
|
} |
||||||
|
|
||||||
|
var chain *chainData |
||||||
|
var emptyChain *chainData |
||||||
|
|
||||||
|
func init() { |
||||||
|
// Create a chain of blocks to import
|
||||||
|
targetBlocks := 128 |
||||||
|
blocks, _ := makeChain(targetBlocks, 0, genesis, false) |
||||||
|
chain = &chainData{blocks, 0} |
||||||
|
|
||||||
|
blocks, _ = makeChain(targetBlocks, 0, genesis, true) |
||||||
|
emptyChain = &chainData{blocks, 0} |
||||||
|
} |
||||||
|
|
||||||
|
func (chain *chainData) headers() []*types.Header { |
||||||
|
hdrs := make([]*types.Header, len(chain.blocks)) |
||||||
|
for i, b := range chain.blocks { |
||||||
|
hdrs[i] = b.Header() |
||||||
|
} |
||||||
|
return hdrs |
||||||
|
} |
||||||
|
|
||||||
|
func (chain *chainData) Len() int { |
||||||
|
return len(chain.blocks) |
||||||
|
} |
||||||
|
|
||||||
|
func dummyPeer(id string) *peerConnection { |
||||||
|
p := &peerConnection{ |
||||||
|
id: id, |
||||||
|
lacking: make(map[common.Hash]struct{}), |
||||||
|
} |
||||||
|
return p |
||||||
|
} |
||||||
|
|
||||||
|
func TestBasics(t *testing.T) { |
||||||
|
numOfBlocks := len(emptyChain.blocks) |
||||||
|
numOfReceipts := len(emptyChain.blocks) / 2 |
||||||
|
|
||||||
|
q := newQueue(10, 10) |
||||||
|
if !q.Idle() { |
||||||
|
t.Errorf("new queue should be idle") |
||||||
|
} |
||||||
|
q.Prepare(1, FastSync) |
||||||
|
if res := q.Results(false); len(res) != 0 { |
||||||
|
t.Fatal("new queue should have 0 results") |
||||||
|
} |
||||||
|
|
||||||
|
// Schedule a batch of headers
|
||||||
|
q.Schedule(chain.headers(), 1) |
||||||
|
if q.Idle() { |
||||||
|
t.Errorf("queue should not be idle") |
||||||
|
} |
||||||
|
if got, exp := q.PendingBlocks(), chain.Len(); got != exp { |
||||||
|
t.Errorf("wrong pending block count, got %d, exp %d", got, exp) |
||||||
|
} |
||||||
|
// Only non-empty receipts get added to task-queue
|
||||||
|
if got, exp := q.PendingReceipts(), 64; got != exp { |
||||||
|
t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) |
||||||
|
} |
||||||
|
// Items are now queued for downloading, next step is that we tell the
|
||||||
|
// queue that a certain peer will deliver them for us
|
||||||
|
{ |
||||||
|
peer := dummyPeer("peer-1") |
||||||
|
fetchReq, _, throttle := q.ReserveBodies(peer, 50) |
||||||
|
if !throttle { |
||||||
|
// queue size is only 10, so throttling should occur
|
||||||
|
t.Fatal("should throttle") |
||||||
|
} |
||||||
|
// But we should still get the first things to fetch
|
||||||
|
if got, exp := len(fetchReq.Headers), 5; got != exp { |
||||||
|
t.Fatalf("expected %d requests, got %d", exp, got) |
||||||
|
} |
||||||
|
if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { |
||||||
|
t.Fatalf("expected header %d, got %d", exp, got) |
||||||
|
} |
||||||
|
} |
||||||
|
if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { |
||||||
|
t.Errorf("expected block task queue to be %d, got %d", exp, got) |
||||||
|
} |
||||||
|
if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { |
||||||
|
t.Errorf("expected receipt task queue to be %d, got %d", exp, got) |
||||||
|
} |
||||||
|
{ |
||||||
|
peer := dummyPeer("peer-2") |
||||||
|
fetchReq, _, throttle := q.ReserveBodies(peer, 50) |
||||||
|
|
||||||
|
// The second peer should hit throttling
|
||||||
|
if !throttle { |
||||||
|
t.Fatalf("should not throttle") |
||||||
|
} |
||||||
|
// And not get any fetches at all, since it was throttled to begin with
|
||||||
|
if fetchReq != nil { |
||||||
|
t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers)) |
||||||
|
} |
||||||
|
} |
||||||
|
if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { |
||||||
|
t.Errorf("expected block task queue to be %d, got %d", exp, got) |
||||||
|
} |
||||||
|
if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { |
||||||
|
t.Errorf("expected receipt task queue to be %d, got %d", exp, got) |
||||||
|
} |
||||||
|
{ |
||||||
|
// The receipt delivering peer should not be affected
|
||||||
|
// by the throttling of body deliveries
|
||||||
|
peer := dummyPeer("peer-3") |
||||||
|
fetchReq, _, throttle := q.ReserveReceipts(peer, 50) |
||||||
|
if !throttle { |
||||||
|
// queue size is only 10, so throttling should occur
|
||||||
|
t.Fatal("should throttle") |
||||||
|
} |
||||||
|
// But we should still get the first things to fetch
|
||||||
|
if got, exp := len(fetchReq.Headers), 5; got != exp { |
||||||
|
t.Fatalf("expected %d requests, got %d", exp, got) |
||||||
|
} |
||||||
|
if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { |
||||||
|
t.Fatalf("expected header %d, got %d", exp, got) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { |
||||||
|
t.Errorf("expected block task queue to be %d, got %d", exp, got) |
||||||
|
} |
||||||
|
if exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got { |
||||||
|
t.Errorf("expected receipt task queue to be %d, got %d", exp, got) |
||||||
|
} |
||||||
|
if got, exp := q.resultCache.countCompleted(), 0; got != exp { |
||||||
|
t.Errorf("wrong processable count, got %d, exp %d", got, exp) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func TestEmptyBlocks(t *testing.T) { |
||||||
|
numOfBlocks := len(emptyChain.blocks) |
||||||
|
|
||||||
|
q := newQueue(10, 10) |
||||||
|
|
||||||
|
q.Prepare(1, FastSync) |
||||||
|
// Schedule a batch of headers
|
||||||
|
q.Schedule(emptyChain.headers(), 1) |
||||||
|
if q.Idle() { |
||||||
|
t.Errorf("queue should not be idle") |
||||||
|
} |
||||||
|
if got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp { |
||||||
|
t.Errorf("wrong pending block count, got %d, exp %d", got, exp) |
||||||
|
} |
||||||
|
if got, exp := q.PendingReceipts(), 0; got != exp { |
||||||
|
t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) |
||||||
|
} |
||||||
|
// They won't be processable, because the fetchresults haven't been
|
||||||
|
// created yet
|
||||||
|
if got, exp := q.resultCache.countCompleted(), 0; got != exp { |
||||||
|
t.Errorf("wrong processable count, got %d, exp %d", got, exp) |
||||||
|
} |
||||||
|
|
||||||
|
// Items are now queued for downloading, next step is that we tell the
|
||||||
|
// queue that a certain peer will deliver them for us
|
||||||
|
// That should trigger all of them to suddenly become 'done'
|
||||||
|
{ |
||||||
|
// Reserve blocks
|
||||||
|
peer := dummyPeer("peer-1") |
||||||
|
fetchReq, _, _ := q.ReserveBodies(peer, 50) |
||||||
|
|
||||||
|
// there should be nothing to fetch, blocks are empty
|
||||||
|
if fetchReq != nil { |
||||||
|
t.Fatal("there should be no body fetch tasks remaining") |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
if q.blockTaskQueue.Size() != numOfBlocks-10 { |
||||||
|
t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) |
||||||
|
} |
||||||
|
if q.receiptTaskQueue.Size() != 0 { |
||||||
|
t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) |
||||||
|
} |
||||||
|
{ |
||||||
|
peer := dummyPeer("peer-3") |
||||||
|
fetchReq, _, _ := q.ReserveReceipts(peer, 50) |
||||||
|
|
||||||
|
// there should be nothing to fetch, blocks are empty
|
||||||
|
if fetchReq != nil { |
||||||
|
t.Fatal("there should be no body fetch tasks remaining") |
||||||
|
} |
||||||
|
} |
||||||
|
if q.blockTaskQueue.Size() != numOfBlocks-10 { |
||||||
|
t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) |
||||||
|
} |
||||||
|
if q.receiptTaskQueue.Size() != 0 { |
||||||
|
t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) |
||||||
|
} |
||||||
|
if got, exp := q.resultCache.countCompleted(), 10; got != exp { |
||||||
|
t.Errorf("wrong processable count, got %d, exp %d", got, exp) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// XTestDelivery does some more extensive testing of events that happen,
|
||||||
|
// blocks that become known and peers that make reservations and deliveries.
|
||||||
|
// disabled since it's not really a unit-test, but can be executed to test
|
||||||
|
// some more advanced scenarios
|
||||||
|
func XTestDelivery(t *testing.T) { |
||||||
|
// the outside network, holding blocks
|
||||||
|
blo, rec := makeChain(128, 0, genesis, false) |
||||||
|
world := newNetwork() |
||||||
|
world.receipts = rec |
||||||
|
world.chain = blo |
||||||
|
world.progress(10) |
||||||
|
if false { |
||||||
|
log.Root().SetHandler(log.StdoutHandler) |
||||||
|
|
||||||
|
} |
||||||
|
q := newQueue(10, 10) |
||||||
|
var wg sync.WaitGroup |
||||||
|
q.Prepare(1, FastSync) |
||||||
|
wg.Add(1) |
||||||
|
go func() { |
||||||
|
// deliver headers
|
||||||
|
defer wg.Done() |
||||||
|
c := 1 |
||||||
|
for { |
||||||
|
//fmt.Printf("getting headers from %d\n", c)
|
||||||
|
hdrs := world.headers(c) |
||||||
|
l := len(hdrs) |
||||||
|
//fmt.Printf("scheduling %d headers, first %d last %d\n",
|
||||||
|
// l, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64())
|
||||||
|
q.Schedule(hdrs, uint64(c)) |
||||||
|
c += l |
||||||
|
} |
||||||
|
}() |
||||||
|
wg.Add(1) |
||||||
|
go func() { |
||||||
|
// collect results
|
||||||
|
defer wg.Done() |
||||||
|
tot := 0 |
||||||
|
for { |
||||||
|
res := q.Results(true) |
||||||
|
tot += len(res) |
||||||
|
fmt.Printf("got %d results, %d tot\n", len(res), tot) |
||||||
|
// Now we can forget about these
|
||||||
|
world.forget(res[len(res)-1].Header.Number.Uint64()) |
||||||
|
|
||||||
|
} |
||||||
|
}() |
||||||
|
wg.Add(1) |
||||||
|
go func() { |
||||||
|
defer wg.Done() |
||||||
|
// reserve body fetch
|
||||||
|
i := 4 |
||||||
|
for { |
||||||
|
peer := dummyPeer(fmt.Sprintf("peer-%d", i)) |
||||||
|
f, _, _ := q.ReserveBodies(peer, rand.Intn(30)) |
||||||
|
if f != nil { |
||||||
|
var emptyList []*types.Header |
||||||
|
var txs [][]*types.Transaction |
||||||
|
var uncles [][]*types.Header |
||||||
|
numToSkip := rand.Intn(len(f.Headers)) |
||||||
|
for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { |
||||||
|
txs = append(txs, world.getTransactions(hdr.Number.Uint64())) |
||||||
|
uncles = append(uncles, emptyList) |
||||||
|
} |
||||||
|
time.Sleep(100 * time.Millisecond) |
||||||
|
_, err := q.DeliverBodies(peer.id, txs, uncles) |
||||||
|
if err != nil { |
||||||
|
fmt.Printf("delivered %d bodies %v\n", len(txs), err) |
||||||
|
} |
||||||
|
} else { |
||||||
|
i++ |
||||||
|
time.Sleep(200 * time.Millisecond) |
||||||
|
} |
||||||
|
} |
||||||
|
}() |
||||||
|
go func() { |
||||||
|
defer wg.Done() |
||||||
|
// reserve receiptfetch
|
||||||
|
peer := dummyPeer("peer-3") |
||||||
|
for { |
||||||
|
f, _, _ := q.ReserveReceipts(peer, rand.Intn(50)) |
||||||
|
if f != nil { |
||||||
|
var rcs [][]*types.Receipt |
||||||
|
for _, hdr := range f.Headers { |
||||||
|
rcs = append(rcs, world.getReceipts(hdr.Number.Uint64())) |
||||||
|
} |
||||||
|
_, err := q.DeliverReceipts(peer.id, rcs) |
||||||
|
if err != nil { |
||||||
|
fmt.Printf("delivered %d receipts %v\n", len(rcs), err) |
||||||
|
} |
||||||
|
time.Sleep(100 * time.Millisecond) |
||||||
|
} else { |
||||||
|
time.Sleep(200 * time.Millisecond) |
||||||
|
} |
||||||
|
} |
||||||
|
}() |
||||||
|
wg.Add(1) |
||||||
|
go func() { |
||||||
|
defer wg.Done() |
||||||
|
for i := 0; i < 50; i++ { |
||||||
|
time.Sleep(300 * time.Millisecond) |
||||||
|
//world.tick()
|
||||||
|
//fmt.Printf("trying to progress\n")
|
||||||
|
world.progress(rand.Intn(100)) |
||||||
|
} |
||||||
|
for i := 0; i < 50; i++ { |
||||||
|
time.Sleep(2990 * time.Millisecond) |
||||||
|
|
||||||
|
} |
||||||
|
}() |
||||||
|
wg.Add(1) |
||||||
|
go func() { |
||||||
|
defer wg.Done() |
||||||
|
for { |
||||||
|
time.Sleep(990 * time.Millisecond) |
||||||
|
fmt.Printf("world block tip is %d\n", |
||||||
|
world.chain[len(world.chain)-1].Header().Number.Uint64()) |
||||||
|
fmt.Println(q.Stats()) |
||||||
|
} |
||||||
|
}() |
||||||
|
wg.Wait() |
||||||
|
} |
||||||
|
|
||||||
|
func newNetwork() *network { |
||||||
|
var l sync.RWMutex |
||||||
|
return &network{ |
||||||
|
cond: sync.NewCond(&l), |
||||||
|
offset: 1, // block 1 is at blocks[0]
|
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// represents the network
|
||||||
|
type network struct { |
||||||
|
offset int |
||||||
|
chain []*types.Block |
||||||
|
receipts []types.Receipts |
||||||
|
lock sync.RWMutex |
||||||
|
cond *sync.Cond |
||||||
|
} |
||||||
|
|
||||||
|
func (n *network) getTransactions(blocknum uint64) types.Transactions { |
||||||
|
index := blocknum - uint64(n.offset) |
||||||
|
return n.chain[index].Transactions() |
||||||
|
} |
||||||
|
func (n *network) getReceipts(blocknum uint64) types.Receipts { |
||||||
|
index := blocknum - uint64(n.offset) |
||||||
|
if got := n.chain[index].Header().Number.Uint64(); got != blocknum { |
||||||
|
fmt.Printf("Err, got %d exp %d\n", got, blocknum) |
||||||
|
panic("sd") |
||||||
|
} |
||||||
|
return n.receipts[index] |
||||||
|
} |
||||||
|
|
||||||
|
func (n *network) forget(blocknum uint64) { |
||||||
|
index := blocknum - uint64(n.offset) |
||||||
|
n.chain = n.chain[index:] |
||||||
|
n.receipts = n.receipts[index:] |
||||||
|
n.offset = int(blocknum) |
||||||
|
|
||||||
|
} |
||||||
|
func (n *network) progress(numBlocks int) { |
||||||
|
|
||||||
|
n.lock.Lock() |
||||||
|
defer n.lock.Unlock() |
||||||
|
//fmt.Printf("progressing...\n")
|
||||||
|
newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false) |
||||||
|
n.chain = append(n.chain, newBlocks...) |
||||||
|
n.receipts = append(n.receipts, newR...) |
||||||
|
n.cond.Broadcast() |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
func (n *network) headers(from int) []*types.Header { |
||||||
|
numHeaders := 128 |
||||||
|
var hdrs []*types.Header |
||||||
|
index := from - n.offset |
||||||
|
|
||||||
|
for index >= len(n.chain) { |
||||||
|
// wait for progress
|
||||||
|
n.cond.L.Lock() |
||||||
|
//fmt.Printf("header going into wait\n")
|
||||||
|
n.cond.Wait() |
||||||
|
index = from - n.offset |
||||||
|
n.cond.L.Unlock() |
||||||
|
} |
||||||
|
n.lock.RLock() |
||||||
|
defer n.lock.RUnlock() |
||||||
|
for i, b := range n.chain[index:] { |
||||||
|
hdrs = append(hdrs, b.Header()) |
||||||
|
if i >= numHeaders { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
return hdrs |
||||||
|
} |
@ -0,0 +1,194 @@ |
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"sync" |
||||||
|
"sync/atomic" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
) |
||||||
|
|
||||||
|
// resultStore implements a structure for maintaining fetchResults, tracking their
|
||||||
|
// download-progress and delivering (finished) results.
|
||||||
|
type resultStore struct { |
||||||
|
items []*fetchResult // Downloaded but not yet delivered fetch results
|
||||||
|
resultOffset uint64 // Offset of the first cached fetch result in the block chain
|
||||||
|
|
||||||
|
// Internal index of first non-completed entry, updated atomically when needed.
|
||||||
|
// If all items are complete, this will equal length(items), so
|
||||||
|
// *important* : is not safe to use for indexing without checking against length
|
||||||
|
indexIncomplete int32 // atomic access
|
||||||
|
|
||||||
|
// throttleThreshold is the limit up to which we _want_ to fill the
|
||||||
|
// results. If blocks are large, we want to limit the results to less
|
||||||
|
// than the number of available slots, and maybe only fill 1024 out of
|
||||||
|
// 8192 possible places. The queue will, at certain times, recalibrate
|
||||||
|
// this index.
|
||||||
|
throttleThreshold uint64 |
||||||
|
|
||||||
|
lock sync.RWMutex |
||||||
|
} |
||||||
|
|
||||||
|
func newResultStore(size int) *resultStore { |
||||||
|
return &resultStore{ |
||||||
|
resultOffset: 0, |
||||||
|
items: make([]*fetchResult, size), |
||||||
|
throttleThreshold: uint64(size), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// SetThrottleThreshold updates the throttling threshold based on the requested
|
||||||
|
// limit and the total queue capacity. It returns the (possibly capped) threshold
|
||||||
|
func (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 { |
||||||
|
r.lock.Lock() |
||||||
|
defer r.lock.Unlock() |
||||||
|
|
||||||
|
limit := uint64(len(r.items)) |
||||||
|
if threshold >= limit { |
||||||
|
threshold = limit |
||||||
|
} |
||||||
|
r.throttleThreshold = threshold |
||||||
|
return r.throttleThreshold |
||||||
|
} |
||||||
|
|
||||||
|
// AddFetch adds a header for body/receipt fetching. This is used when the queue
|
||||||
|
// wants to reserve headers for fetching.
|
||||||
|
//
|
||||||
|
// It returns the following:
|
||||||
|
// stale - if true, this item is already passed, and should not be requested again
|
||||||
|
// throttled - if true, the store is at capacity, this particular header is not prio now
|
||||||
|
// item - the result to store data into
|
||||||
|
// err - any error that occurred
|
||||||
|
func (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) { |
||||||
|
r.lock.Lock() |
||||||
|
defer r.lock.Unlock() |
||||||
|
|
||||||
|
var index int |
||||||
|
item, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64()) |
||||||
|
if err != nil || stale || throttled { |
||||||
|
return stale, throttled, item, err |
||||||
|
} |
||||||
|
if item == nil { |
||||||
|
item = newFetchResult(header, fastSync) |
||||||
|
r.items[index] = item |
||||||
|
} |
||||||
|
return stale, throttled, item, err |
||||||
|
} |
||||||
|
|
||||||
|
// GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag
|
||||||
|
// is true, that means the header has already been delivered 'upstream'. This method
|
||||||
|
// does not bubble up the 'throttle' flag, since it's moot at the point in time when
|
||||||
|
// the item is downloaded and ready for delivery
|
||||||
|
func (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) { |
||||||
|
r.lock.RLock() |
||||||
|
defer r.lock.RUnlock() |
||||||
|
|
||||||
|
res, _, stale, _, err := r.getFetchResult(headerNumber) |
||||||
|
return res, stale, err |
||||||
|
} |
||||||
|
|
||||||
|
// getFetchResult returns the fetchResult corresponding to the given item, and
|
||||||
|
// the index where the result is stored.
|
||||||
|
func (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) { |
||||||
|
index = int(int64(headerNumber) - int64(r.resultOffset)) |
||||||
|
throttle = index >= int(r.throttleThreshold) |
||||||
|
stale = index < 0 |
||||||
|
|
||||||
|
if index >= len(r.items) { |
||||||
|
err = fmt.Errorf("%w: index allocation went beyond available resultStore space "+ |
||||||
|
"(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d", errInvalidChain, |
||||||
|
index, headerNumber, r.resultOffset, len(r.items)) |
||||||
|
return nil, index, stale, throttle, err |
||||||
|
} |
||||||
|
if stale { |
||||||
|
return nil, index, stale, throttle, nil |
||||||
|
} |
||||||
|
item = r.items[index] |
||||||
|
return item, index, stale, throttle, nil |
||||||
|
} |
||||||
|
|
||||||
|
// hasCompletedItems returns true if there are processable items available
|
||||||
|
// this method is cheaper than countCompleted
|
||||||
|
func (r *resultStore) HasCompletedItems() bool { |
||||||
|
r.lock.RLock() |
||||||
|
defer r.lock.RUnlock() |
||||||
|
|
||||||
|
if len(r.items) == 0 { |
||||||
|
return false |
||||||
|
} |
||||||
|
if item := r.items[0]; item != nil && item.AllDone() { |
||||||
|
return true |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// countCompleted returns the number of items ready for delivery, stopping at
|
||||||
|
// the first non-complete item.
|
||||||
|
//
|
||||||
|
// The mthod assumes (at least) rlock is held.
|
||||||
|
func (r *resultStore) countCompleted() int { |
||||||
|
// We iterate from the already known complete point, and see
|
||||||
|
// if any more has completed since last count
|
||||||
|
index := atomic.LoadInt32(&r.indexIncomplete) |
||||||
|
for ; ; index++ { |
||||||
|
if index >= int32(len(r.items)) { |
||||||
|
break |
||||||
|
} |
||||||
|
result := r.items[index] |
||||||
|
if result == nil || !result.AllDone() { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
atomic.StoreInt32(&r.indexIncomplete, index) |
||||||
|
return int(index) |
||||||
|
} |
||||||
|
|
||||||
|
// GetCompleted returns the next batch of completed fetchResults
|
||||||
|
func (r *resultStore) GetCompleted(limit int) []*fetchResult { |
||||||
|
r.lock.Lock() |
||||||
|
defer r.lock.Unlock() |
||||||
|
|
||||||
|
completed := r.countCompleted() |
||||||
|
if limit > completed { |
||||||
|
limit = completed |
||||||
|
} |
||||||
|
results := make([]*fetchResult, limit) |
||||||
|
copy(results, r.items[:limit]) |
||||||
|
|
||||||
|
// Delete the results from the cache and clear the tail.
|
||||||
|
copy(r.items, r.items[limit:]) |
||||||
|
for i := len(r.items) - limit; i < len(r.items); i++ { |
||||||
|
r.items[i] = nil |
||||||
|
} |
||||||
|
// Advance the expected block number of the first cache entry
|
||||||
|
r.resultOffset += uint64(limit) |
||||||
|
atomic.AddInt32(&r.indexIncomplete, int32(-limit)) |
||||||
|
|
||||||
|
return results |
||||||
|
} |
||||||
|
|
||||||
|
// Prepare initialises the offset with the given block number
|
||||||
|
func (r *resultStore) Prepare(offset uint64) { |
||||||
|
r.lock.Lock() |
||||||
|
defer r.lock.Unlock() |
||||||
|
|
||||||
|
if r.resultOffset < offset { |
||||||
|
r.resultOffset = offset |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,615 @@ |
|||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb" |
||||||
|
"github.com/ethereum/go-ethereum/core/state" |
||||||
|
"github.com/ethereum/go-ethereum/crypto" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
"golang.org/x/crypto/sha3" |
||||||
|
) |
||||||
|
|
||||||
|
// stateReq represents a batch of state fetch requests grouped together into
|
||||||
|
// a single data retrieval network packet.
|
||||||
|
type stateReq struct { |
||||||
|
nItems uint16 // Number of items requested for download (max is 384, so uint16 is sufficient)
|
||||||
|
trieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts
|
||||||
|
codeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts
|
||||||
|
timeout time.Duration // Maximum round trip time for this to complete
|
||||||
|
timer *time.Timer // Timer to fire when the RTT timeout expires
|
||||||
|
peer *peerConnection // Peer that we're requesting from
|
||||||
|
delivered time.Time // Time when the packet was delivered (independent when we process it)
|
||||||
|
response [][]byte // Response data of the peer (nil for timeouts)
|
||||||
|
dropped bool // Flag whether the peer dropped off early
|
||||||
|
} |
||||||
|
|
||||||
|
// timedOut returns if this request timed out.
|
||||||
|
func (req *stateReq) timedOut() bool { |
||||||
|
return req.response == nil |
||||||
|
} |
||||||
|
|
||||||
|
// stateSyncStats is a collection of progress stats to report during a state trie
|
||||||
|
// sync to RPC requests as well as to display in user logs.
|
||||||
|
type stateSyncStats struct { |
||||||
|
processed uint64 // Number of state entries processed
|
||||||
|
duplicate uint64 // Number of state entries downloaded twice
|
||||||
|
unexpected uint64 // Number of non-requested state entries received
|
||||||
|
pending uint64 // Number of still pending state entries
|
||||||
|
} |
||||||
|
|
||||||
|
// syncState starts downloading state with the given root hash.
|
||||||
|
func (d *Downloader) syncState(root common.Hash) *stateSync { |
||||||
|
// Create the state sync
|
||||||
|
s := newStateSync(d, root) |
||||||
|
select { |
||||||
|
case d.stateSyncStart <- s: |
||||||
|
// If we tell the statesync to restart with a new root, we also need
|
||||||
|
// to wait for it to actually also start -- when old requests have timed
|
||||||
|
// out or been delivered
|
||||||
|
<-s.started |
||||||
|
case <-d.quitCh: |
||||||
|
s.err = errCancelStateFetch |
||||||
|
close(s.done) |
||||||
|
} |
||||||
|
return s |
||||||
|
} |
||||||
|
|
||||||
|
// stateFetcher manages the active state sync and accepts requests
|
||||||
|
// on its behalf.
|
||||||
|
func (d *Downloader) stateFetcher() { |
||||||
|
for { |
||||||
|
select { |
||||||
|
case s := <-d.stateSyncStart: |
||||||
|
for next := s; next != nil; { |
||||||
|
next = d.runStateSync(next) |
||||||
|
} |
||||||
|
case <-d.stateCh: |
||||||
|
// Ignore state responses while no sync is running.
|
||||||
|
case <-d.quitCh: |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// runStateSync runs a state synchronisation until it completes or another root
|
||||||
|
// hash is requested to be switched over to.
|
||||||
|
func (d *Downloader) runStateSync(s *stateSync) *stateSync { |
||||||
|
var ( |
||||||
|
active = make(map[string]*stateReq) // Currently in-flight requests
|
||||||
|
finished []*stateReq // Completed or failed requests
|
||||||
|
timeout = make(chan *stateReq) // Timed out active requests
|
||||||
|
) |
||||||
|
log.Trace("State sync starting", "root", s.root) |
||||||
|
|
||||||
|
defer func() { |
||||||
|
// Cancel active request timers on exit. Also set peers to idle so they're
|
||||||
|
// available for the next sync.
|
||||||
|
for _, req := range active { |
||||||
|
req.timer.Stop() |
||||||
|
req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) |
||||||
|
} |
||||||
|
}() |
||||||
|
go s.run() |
||||||
|
defer s.Cancel() |
||||||
|
|
||||||
|
// Listen for peer departure events to cancel assigned tasks
|
||||||
|
peerDrop := make(chan *peerConnection, 1024) |
||||||
|
peerSub := s.d.peers.SubscribePeerDrops(peerDrop) |
||||||
|
defer peerSub.Unsubscribe() |
||||||
|
|
||||||
|
for { |
||||||
|
// Enable sending of the first buffered element if there is one.
|
||||||
|
var ( |
||||||
|
deliverReq *stateReq |
||||||
|
deliverReqCh chan *stateReq |
||||||
|
) |
||||||
|
if len(finished) > 0 { |
||||||
|
deliverReq = finished[0] |
||||||
|
deliverReqCh = s.deliver |
||||||
|
} |
||||||
|
|
||||||
|
select { |
||||||
|
// The stateSync lifecycle:
|
||||||
|
case next := <-d.stateSyncStart: |
||||||
|
d.spindownStateSync(active, finished, timeout, peerDrop) |
||||||
|
return next |
||||||
|
|
||||||
|
case <-s.done: |
||||||
|
d.spindownStateSync(active, finished, timeout, peerDrop) |
||||||
|
return nil |
||||||
|
|
||||||
|
// Send the next finished request to the current sync:
|
||||||
|
case deliverReqCh <- deliverReq: |
||||||
|
// Shift out the first request, but also set the emptied slot to nil for GC
|
||||||
|
copy(finished, finished[1:]) |
||||||
|
finished[len(finished)-1] = nil |
||||||
|
finished = finished[:len(finished)-1] |
||||||
|
|
||||||
|
// Handle incoming state packs:
|
||||||
|
case pack := <-d.stateCh: |
||||||
|
// Discard any data not requested (or previously timed out)
|
||||||
|
req := active[pack.PeerId()] |
||||||
|
if req == nil { |
||||||
|
log.Debug("Unrequested node data", "peer", pack.PeerId(), "len", pack.Items()) |
||||||
|
continue |
||||||
|
} |
||||||
|
// Finalize the request and queue up for processing
|
||||||
|
req.timer.Stop() |
||||||
|
req.response = pack.(*statePack).states |
||||||
|
req.delivered = time.Now() |
||||||
|
|
||||||
|
finished = append(finished, req) |
||||||
|
delete(active, pack.PeerId()) |
||||||
|
|
||||||
|
// Handle dropped peer connections:
|
||||||
|
case p := <-peerDrop: |
||||||
|
// Skip if no request is currently pending
|
||||||
|
req := active[p.id] |
||||||
|
if req == nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
// Finalize the request and queue up for processing
|
||||||
|
req.timer.Stop() |
||||||
|
req.dropped = true |
||||||
|
req.delivered = time.Now() |
||||||
|
|
||||||
|
finished = append(finished, req) |
||||||
|
delete(active, p.id) |
||||||
|
|
||||||
|
// Handle timed-out requests:
|
||||||
|
case req := <-timeout: |
||||||
|
// If the peer is already requesting something else, ignore the stale timeout.
|
||||||
|
// This can happen when the timeout and the delivery happens simultaneously,
|
||||||
|
// causing both pathways to trigger.
|
||||||
|
if active[req.peer.id] != req { |
||||||
|
continue |
||||||
|
} |
||||||
|
req.delivered = time.Now() |
||||||
|
// Move the timed out data back into the download queue
|
||||||
|
finished = append(finished, req) |
||||||
|
delete(active, req.peer.id) |
||||||
|
|
||||||
|
// Track outgoing state requests:
|
||||||
|
case req := <-d.trackStateReq: |
||||||
|
// If an active request already exists for this peer, we have a problem. In
|
||||||
|
// theory the trie node schedule must never assign two requests to the same
|
||||||
|
// peer. In practice however, a peer might receive a request, disconnect and
|
||||||
|
// immediately reconnect before the previous times out. In this case the first
|
||||||
|
// request is never honored, alas we must not silently overwrite it, as that
|
||||||
|
// causes valid requests to go missing and sync to get stuck.
|
||||||
|
if old := active[req.peer.id]; old != nil { |
||||||
|
log.Warn("Busy peer assigned new state fetch", "peer", old.peer.id) |
||||||
|
// Move the previous request to the finished set
|
||||||
|
old.timer.Stop() |
||||||
|
old.dropped = true |
||||||
|
old.delivered = time.Now() |
||||||
|
finished = append(finished, old) |
||||||
|
} |
||||||
|
// Start a timer to notify the sync loop if the peer stalled.
|
||||||
|
req.timer = time.AfterFunc(req.timeout, func() { |
||||||
|
timeout <- req |
||||||
|
}) |
||||||
|
active[req.peer.id] = req |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// spindownStateSync 'drains' the outstanding requests; some will be delivered and other
|
||||||
|
// will time out. This is to ensure that when the next stateSync starts working, all peers
|
||||||
|
// are marked as idle and de facto _are_ idle.
|
||||||
|
func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) { |
||||||
|
log.Trace("State sync spinning down", "active", len(active), "finished", len(finished)) |
||||||
|
for len(active) > 0 { |
||||||
|
var ( |
||||||
|
req *stateReq |
||||||
|
reason string |
||||||
|
) |
||||||
|
select { |
||||||
|
// Handle (drop) incoming state packs:
|
||||||
|
case pack := <-d.stateCh: |
||||||
|
req = active[pack.PeerId()] |
||||||
|
reason = "delivered" |
||||||
|
// Handle dropped peer connections:
|
||||||
|
case p := <-peerDrop: |
||||||
|
req = active[p.id] |
||||||
|
reason = "peerdrop" |
||||||
|
// Handle timed-out requests:
|
||||||
|
case req = <-timeout: |
||||||
|
reason = "timeout" |
||||||
|
} |
||||||
|
if req == nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
req.peer.log.Trace("State peer marked idle (spindown)", "req.items", int(req.nItems), "reason", reason) |
||||||
|
req.timer.Stop() |
||||||
|
delete(active, req.peer.id) |
||||||
|
req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) |
||||||
|
} |
||||||
|
// The 'finished' set contains deliveries that we were going to pass to processing.
|
||||||
|
// Those are now moot, but we still need to set those peers as idle, which would
|
||||||
|
// otherwise have been done after processing
|
||||||
|
for _, req := range finished { |
||||||
|
req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// stateSync schedules requests for downloading a particular state trie defined
|
||||||
|
// by a given state root.
|
||||||
|
type stateSync struct { |
||||||
|
d *Downloader // Downloader instance to access and manage current peerset
|
||||||
|
|
||||||
|
root common.Hash // State root currently being synced
|
||||||
|
sched *trie.Sync // State trie sync scheduler defining the tasks
|
||||||
|
keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with
|
||||||
|
|
||||||
|
trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval
|
||||||
|
codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval
|
||||||
|
|
||||||
|
numUncommitted int |
||||||
|
bytesUncommitted int |
||||||
|
|
||||||
|
started chan struct{} // Started is signalled once the sync loop starts
|
||||||
|
|
||||||
|
deliver chan *stateReq // Delivery channel multiplexing peer responses
|
||||||
|
cancel chan struct{} // Channel to signal a termination request
|
||||||
|
cancelOnce sync.Once // Ensures cancel only ever gets called once
|
||||||
|
done chan struct{} // Channel to signal termination completion
|
||||||
|
err error // Any error hit during sync (set before completion)
|
||||||
|
} |
||||||
|
|
||||||
|
// trieTask represents a single trie node download task, containing a set of
|
||||||
|
// peers already attempted retrieval from to detect stalled syncs and abort.
|
||||||
|
type trieTask struct { |
||||||
|
path [][]byte |
||||||
|
attempts map[string]struct{} |
||||||
|
} |
||||||
|
|
||||||
|
// codeTask represents a single byte code download task, containing a set of
|
||||||
|
// peers already attempted retrieval from to detect stalled syncs and abort.
|
||||||
|
type codeTask struct { |
||||||
|
attempts map[string]struct{} |
||||||
|
} |
||||||
|
|
||||||
|
// newStateSync creates a new state trie download scheduler. This method does not
|
||||||
|
// yet start the sync. The user needs to call run to initiate.
|
||||||
|
func newStateSync(d *Downloader, root common.Hash) *stateSync { |
||||||
|
return &stateSync{ |
||||||
|
d: d, |
||||||
|
root: root, |
||||||
|
sched: state.NewStateSync(root, d.stateDB, d.stateBloom, nil), |
||||||
|
keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), |
||||||
|
trieTasks: make(map[common.Hash]*trieTask), |
||||||
|
codeTasks: make(map[common.Hash]*codeTask), |
||||||
|
deliver: make(chan *stateReq), |
||||||
|
cancel: make(chan struct{}), |
||||||
|
done: make(chan struct{}), |
||||||
|
started: make(chan struct{}), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// run starts the task assignment and response processing loop, blocking until
|
||||||
|
// it finishes, and finally notifying any goroutines waiting for the loop to
|
||||||
|
// finish.
|
||||||
|
func (s *stateSync) run() { |
||||||
|
close(s.started) |
||||||
|
if s.d.snapSync { |
||||||
|
s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) |
||||||
|
} else { |
||||||
|
s.err = s.loop() |
||||||
|
} |
||||||
|
close(s.done) |
||||||
|
} |
||||||
|
|
||||||
|
// Wait blocks until the sync is done or canceled.
|
||||||
|
func (s *stateSync) Wait() error { |
||||||
|
<-s.done |
||||||
|
return s.err |
||||||
|
} |
||||||
|
|
||||||
|
// Cancel cancels the sync and waits until it has shut down.
|
||||||
|
func (s *stateSync) Cancel() error { |
||||||
|
s.cancelOnce.Do(func() { |
||||||
|
close(s.cancel) |
||||||
|
}) |
||||||
|
return s.Wait() |
||||||
|
} |
||||||
|
|
||||||
|
// loop is the main event loop of a state trie sync. It it responsible for the
|
||||||
|
// assignment of new tasks to peers (including sending it to them) as well as
|
||||||
|
// for the processing of inbound data. Note, that the loop does not directly
|
||||||
|
// receive data from peers, rather those are buffered up in the downloader and
|
||||||
|
// pushed here async. The reason is to decouple processing from data receipt
|
||||||
|
// and timeouts.
|
||||||
|
func (s *stateSync) loop() (err error) { |
||||||
|
// Listen for new peer events to assign tasks to them
|
||||||
|
newPeer := make(chan *peerConnection, 1024) |
||||||
|
peerSub := s.d.peers.SubscribeNewPeers(newPeer) |
||||||
|
defer peerSub.Unsubscribe() |
||||||
|
defer func() { |
||||||
|
cerr := s.commit(true) |
||||||
|
if err == nil { |
||||||
|
err = cerr |
||||||
|
} |
||||||
|
}() |
||||||
|
|
||||||
|
// Keep assigning new tasks until the sync completes or aborts
|
||||||
|
for s.sched.Pending() > 0 { |
||||||
|
if err = s.commit(false); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
s.assignTasks() |
||||||
|
// Tasks assigned, wait for something to happen
|
||||||
|
select { |
||||||
|
case <-newPeer: |
||||||
|
// New peer arrived, try to assign it download tasks
|
||||||
|
|
||||||
|
case <-s.cancel: |
||||||
|
return errCancelStateFetch |
||||||
|
|
||||||
|
case <-s.d.cancelCh: |
||||||
|
return errCanceled |
||||||
|
|
||||||
|
case req := <-s.deliver: |
||||||
|
// Response, disconnect or timeout triggered, drop the peer if stalling
|
||||||
|
log.Trace("Received node data response", "peer", req.peer.id, "count", len(req.response), "dropped", req.dropped, "timeout", !req.dropped && req.timedOut()) |
||||||
|
if req.nItems <= 2 && !req.dropped && req.timedOut() { |
||||||
|
// 2 items are the minimum requested, if even that times out, we've no use of
|
||||||
|
// this peer at the moment.
|
||||||
|
log.Warn("Stalling state sync, dropping peer", "peer", req.peer.id) |
||||||
|
if s.d.dropPeer == nil { |
||||||
|
// The dropPeer method is nil when `--copydb` is used for a local copy.
|
||||||
|
// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
|
||||||
|
req.peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", req.peer.id) |
||||||
|
} else { |
||||||
|
s.d.dropPeer(req.peer.id) |
||||||
|
|
||||||
|
// If this peer was the master peer, abort sync immediately
|
||||||
|
s.d.cancelLock.RLock() |
||||||
|
master := req.peer.id == s.d.cancelPeer |
||||||
|
s.d.cancelLock.RUnlock() |
||||||
|
|
||||||
|
if master { |
||||||
|
s.d.cancel() |
||||||
|
return errTimeout |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
// Process all the received blobs and check for stale delivery
|
||||||
|
delivered, err := s.process(req) |
||||||
|
req.peer.SetNodeDataIdle(delivered, req.delivered) |
||||||
|
if err != nil { |
||||||
|
log.Warn("Node data write error", "err", err) |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (s *stateSync) commit(force bool) error { |
||||||
|
if !force && s.bytesUncommitted < ethdb.IdealBatchSize { |
||||||
|
return nil |
||||||
|
} |
||||||
|
start := time.Now() |
||||||
|
b := s.d.stateDB.NewBatch() |
||||||
|
if err := s.sched.Commit(b); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err := b.Write(); err != nil { |
||||||
|
return fmt.Errorf("DB write error: %v", err) |
||||||
|
} |
||||||
|
s.updateStats(s.numUncommitted, 0, 0, time.Since(start)) |
||||||
|
s.numUncommitted = 0 |
||||||
|
s.bytesUncommitted = 0 |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// assignTasks attempts to assign new tasks to all idle peers, either from the
|
||||||
|
// batch currently being retried, or fetching new data from the trie sync itself.
|
||||||
|
func (s *stateSync) assignTasks() { |
||||||
|
// Iterate over all idle peers and try to assign them state fetches
|
||||||
|
peers, _ := s.d.peers.NodeDataIdlePeers() |
||||||
|
for _, p := range peers { |
||||||
|
// Assign a batch of fetches proportional to the estimated latency/bandwidth
|
||||||
|
cap := p.NodeDataCapacity(s.d.peers.rates.TargetRoundTrip()) |
||||||
|
req := &stateReq{peer: p, timeout: s.d.peers.rates.TargetTimeout()} |
||||||
|
|
||||||
|
nodes, _, codes := s.fillTasks(cap, req) |
||||||
|
|
||||||
|
// If the peer was assigned tasks to fetch, send the network request
|
||||||
|
if len(nodes)+len(codes) > 0 { |
||||||
|
req.peer.log.Trace("Requesting batch of state data", "nodes", len(nodes), "codes", len(codes), "root", s.root) |
||||||
|
select { |
||||||
|
case s.d.trackStateReq <- req: |
||||||
|
req.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x
|
||||||
|
case <-s.cancel: |
||||||
|
case <-s.d.cancelCh: |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// fillTasks fills the given request object with a maximum of n state download
|
||||||
|
// tasks to send to the remote peer.
|
||||||
|
func (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) { |
||||||
|
// Refill available tasks from the scheduler.
|
||||||
|
if fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 { |
||||||
|
nodes, paths, codes := s.sched.Missing(fill) |
||||||
|
for i, hash := range nodes { |
||||||
|
s.trieTasks[hash] = &trieTask{ |
||||||
|
path: paths[i], |
||||||
|
attempts: make(map[string]struct{}), |
||||||
|
} |
||||||
|
} |
||||||
|
for _, hash := range codes { |
||||||
|
s.codeTasks[hash] = &codeTask{ |
||||||
|
attempts: make(map[string]struct{}), |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
// Find tasks that haven't been tried with the request's peer. Prefer code
|
||||||
|
// over trie nodes as those can be written to disk and forgotten about.
|
||||||
|
nodes = make([]common.Hash, 0, n) |
||||||
|
paths = make([]trie.SyncPath, 0, n) |
||||||
|
codes = make([]common.Hash, 0, n) |
||||||
|
|
||||||
|
req.trieTasks = make(map[common.Hash]*trieTask, n) |
||||||
|
req.codeTasks = make(map[common.Hash]*codeTask, n) |
||||||
|
|
||||||
|
for hash, t := range s.codeTasks { |
||||||
|
// Stop when we've gathered enough requests
|
||||||
|
if len(nodes)+len(codes) == n { |
||||||
|
break |
||||||
|
} |
||||||
|
// Skip any requests we've already tried from this peer
|
||||||
|
if _, ok := t.attempts[req.peer.id]; ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
// Assign the request to this peer
|
||||||
|
t.attempts[req.peer.id] = struct{}{} |
||||||
|
codes = append(codes, hash) |
||||||
|
req.codeTasks[hash] = t |
||||||
|
delete(s.codeTasks, hash) |
||||||
|
} |
||||||
|
for hash, t := range s.trieTasks { |
||||||
|
// Stop when we've gathered enough requests
|
||||||
|
if len(nodes)+len(codes) == n { |
||||||
|
break |
||||||
|
} |
||||||
|
// Skip any requests we've already tried from this peer
|
||||||
|
if _, ok := t.attempts[req.peer.id]; ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
// Assign the request to this peer
|
||||||
|
t.attempts[req.peer.id] = struct{}{} |
||||||
|
|
||||||
|
nodes = append(nodes, hash) |
||||||
|
paths = append(paths, t.path) |
||||||
|
|
||||||
|
req.trieTasks[hash] = t |
||||||
|
delete(s.trieTasks, hash) |
||||||
|
} |
||||||
|
req.nItems = uint16(len(nodes) + len(codes)) |
||||||
|
return nodes, paths, codes |
||||||
|
} |
||||||
|
|
||||||
|
// process iterates over a batch of delivered state data, injecting each item
|
||||||
|
// into a running state sync, re-queuing any items that were requested but not
|
||||||
|
// delivered. Returns whether the peer actually managed to deliver anything of
|
||||||
|
// value, and any error that occurred.
|
||||||
|
func (s *stateSync) process(req *stateReq) (int, error) { |
||||||
|
// Collect processing stats and update progress if valid data was received
|
||||||
|
duplicate, unexpected, successful := 0, 0, 0 |
||||||
|
|
||||||
|
defer func(start time.Time) { |
||||||
|
if duplicate > 0 || unexpected > 0 { |
||||||
|
s.updateStats(0, duplicate, unexpected, time.Since(start)) |
||||||
|
} |
||||||
|
}(time.Now()) |
||||||
|
|
||||||
|
// Iterate over all the delivered data and inject one-by-one into the trie
|
||||||
|
for _, blob := range req.response { |
||||||
|
hash, err := s.processNodeData(blob) |
||||||
|
switch err { |
||||||
|
case nil: |
||||||
|
s.numUncommitted++ |
||||||
|
s.bytesUncommitted += len(blob) |
||||||
|
successful++ |
||||||
|
case trie.ErrNotRequested: |
||||||
|
unexpected++ |
||||||
|
case trie.ErrAlreadyProcessed: |
||||||
|
duplicate++ |
||||||
|
default: |
||||||
|
return successful, fmt.Errorf("invalid state node %s: %v", hash.TerminalString(), err) |
||||||
|
} |
||||||
|
// Delete from both queues (one delivery is enough for the syncer)
|
||||||
|
delete(req.trieTasks, hash) |
||||||
|
delete(req.codeTasks, hash) |
||||||
|
} |
||||||
|
// Put unfulfilled tasks back into the retry queue
|
||||||
|
npeers := s.d.peers.Len() |
||||||
|
for hash, task := range req.trieTasks { |
||||||
|
// If the node did deliver something, missing items may be due to a protocol
|
||||||
|
// limit or a previous timeout + delayed delivery. Both cases should permit
|
||||||
|
// the node to retry the missing items (to avoid single-peer stalls).
|
||||||
|
if len(req.response) > 0 || req.timedOut() { |
||||||
|
delete(task.attempts, req.peer.id) |
||||||
|
} |
||||||
|
// If we've requested the node too many times already, it may be a malicious
|
||||||
|
// sync where nobody has the right data. Abort.
|
||||||
|
if len(task.attempts) >= npeers { |
||||||
|
return successful, fmt.Errorf("trie node %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) |
||||||
|
} |
||||||
|
// Missing item, place into the retry queue.
|
||||||
|
s.trieTasks[hash] = task |
||||||
|
} |
||||||
|
for hash, task := range req.codeTasks { |
||||||
|
// If the node did deliver something, missing items may be due to a protocol
|
||||||
|
// limit or a previous timeout + delayed delivery. Both cases should permit
|
||||||
|
// the node to retry the missing items (to avoid single-peer stalls).
|
||||||
|
if len(req.response) > 0 || req.timedOut() { |
||||||
|
delete(task.attempts, req.peer.id) |
||||||
|
} |
||||||
|
// If we've requested the node too many times already, it may be a malicious
|
||||||
|
// sync where nobody has the right data. Abort.
|
||||||
|
if len(task.attempts) >= npeers { |
||||||
|
return successful, fmt.Errorf("byte code %s failed with all peers (%d tries, %d peers)", hash.TerminalString(), len(task.attempts), npeers) |
||||||
|
} |
||||||
|
// Missing item, place into the retry queue.
|
||||||
|
s.codeTasks[hash] = task |
||||||
|
} |
||||||
|
return successful, nil |
||||||
|
} |
||||||
|
|
||||||
|
// processNodeData tries to inject a trie node data blob delivered from a remote
|
||||||
|
// peer into the state trie, returning whether anything useful was written or any
|
||||||
|
// error occurred.
|
||||||
|
func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { |
||||||
|
res := trie.SyncResult{Data: blob} |
||||||
|
s.keccak.Reset() |
||||||
|
s.keccak.Write(blob) |
||||||
|
s.keccak.Read(res.Hash[:]) |
||||||
|
err := s.sched.Process(res) |
||||||
|
return res.Hash, err |
||||||
|
} |
||||||
|
|
||||||
|
// updateStats bumps the various state sync progress counters and displays a log
|
||||||
|
// message for the user to see.
|
||||||
|
func (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) { |
||||||
|
s.d.syncStatsLock.Lock() |
||||||
|
defer s.d.syncStatsLock.Unlock() |
||||||
|
|
||||||
|
s.d.syncStatsState.pending = uint64(s.sched.Pending()) |
||||||
|
s.d.syncStatsState.processed += uint64(written) |
||||||
|
s.d.syncStatsState.duplicate += uint64(duplicate) |
||||||
|
s.d.syncStatsState.unexpected += uint64(unexpected) |
||||||
|
|
||||||
|
if written > 0 || duplicate > 0 || unexpected > 0 { |
||||||
|
log.Info("Imported new state entries", "count", written, "elapsed", common.PrettyDuration(duration), "processed", s.d.syncStatsState.processed, "pending", s.d.syncStatsState.pending, "trieretry", len(s.trieTasks), "coderetry", len(s.codeTasks), "duplicate", s.d.syncStatsState.duplicate, "unexpected", s.d.syncStatsState.unexpected) |
||||||
|
} |
||||||
|
if written > 0 { |
||||||
|
rawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,230 @@ |
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"math/big" |
||||||
|
"sync" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||||
|
"github.com/ethereum/go-ethereum/core" |
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/crypto" |
||||||
|
"github.com/ethereum/go-ethereum/params" |
||||||
|
) |
||||||
|
|
||||||
|
// Test chain parameters.
|
||||||
|
var ( |
||||||
|
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") |
||||||
|
testAddress = crypto.PubkeyToAddress(testKey.PublicKey) |
||||||
|
testDB = rawdb.NewMemoryDatabase() |
||||||
|
testGenesis = core.GenesisBlockForTesting(testDB, testAddress, big.NewInt(1000000000000000)) |
||||||
|
) |
||||||
|
|
||||||
|
// The common prefix of all test chains:
|
||||||
|
var testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) |
||||||
|
|
||||||
|
// Different forks on top of the base chain:
|
||||||
|
var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain |
||||||
|
|
||||||
|
func init() { |
||||||
|
var forkLen = int(fullMaxForkAncestry + 50) |
||||||
|
var wg sync.WaitGroup |
||||||
|
wg.Add(3) |
||||||
|
go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }() |
||||||
|
go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }() |
||||||
|
go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }() |
||||||
|
wg.Wait() |
||||||
|
} |
||||||
|
|
||||||
|
type testChain struct { |
||||||
|
genesis *types.Block |
||||||
|
chain []common.Hash |
||||||
|
headerm map[common.Hash]*types.Header |
||||||
|
blockm map[common.Hash]*types.Block |
||||||
|
receiptm map[common.Hash][]*types.Receipt |
||||||
|
tdm map[common.Hash]*big.Int |
||||||
|
} |
||||||
|
|
||||||
|
// newTestChain creates a blockchain of the given length.
|
||||||
|
func newTestChain(length int, genesis *types.Block) *testChain { |
||||||
|
tc := new(testChain).copy(length) |
||||||
|
tc.genesis = genesis |
||||||
|
tc.chain = append(tc.chain, genesis.Hash()) |
||||||
|
tc.headerm[tc.genesis.Hash()] = tc.genesis.Header() |
||||||
|
tc.tdm[tc.genesis.Hash()] = tc.genesis.Difficulty() |
||||||
|
tc.blockm[tc.genesis.Hash()] = tc.genesis |
||||||
|
tc.generate(length-1, 0, genesis, false) |
||||||
|
return tc |
||||||
|
} |
||||||
|
|
||||||
|
// makeFork creates a fork on top of the test chain.
|
||||||
|
func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain { |
||||||
|
fork := tc.copy(tc.len() + length) |
||||||
|
fork.generate(length, seed, tc.headBlock(), heavy) |
||||||
|
return fork |
||||||
|
} |
||||||
|
|
||||||
|
// shorten creates a copy of the chain with the given length. It panics if the
|
||||||
|
// length is longer than the number of available blocks.
|
||||||
|
func (tc *testChain) shorten(length int) *testChain { |
||||||
|
if length > tc.len() { |
||||||
|
panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, tc.len())) |
||||||
|
} |
||||||
|
return tc.copy(length) |
||||||
|
} |
||||||
|
|
||||||
|
func (tc *testChain) copy(newlen int) *testChain { |
||||||
|
cpy := &testChain{ |
||||||
|
genesis: tc.genesis, |
||||||
|
headerm: make(map[common.Hash]*types.Header, newlen), |
||||||
|
blockm: make(map[common.Hash]*types.Block, newlen), |
||||||
|
receiptm: make(map[common.Hash][]*types.Receipt, newlen), |
||||||
|
tdm: make(map[common.Hash]*big.Int, newlen), |
||||||
|
} |
||||||
|
for i := 0; i < len(tc.chain) && i < newlen; i++ { |
||||||
|
hash := tc.chain[i] |
||||||
|
cpy.chain = append(cpy.chain, tc.chain[i]) |
||||||
|
cpy.tdm[hash] = tc.tdm[hash] |
||||||
|
cpy.blockm[hash] = tc.blockm[hash] |
||||||
|
cpy.headerm[hash] = tc.headerm[hash] |
||||||
|
cpy.receiptm[hash] = tc.receiptm[hash] |
||||||
|
} |
||||||
|
return cpy |
||||||
|
} |
||||||
|
|
||||||
|
// generate creates a chain of n blocks starting at and including parent.
|
||||||
|
// the returned hash chain is ordered head->parent. In addition, every 22th block
|
||||||
|
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||||
|
// reassembly.
|
||||||
|
func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) { |
||||||
|
// start := time.Now()
|
||||||
|
// defer func() { fmt.Printf("test chain generated in %v\n", time.Since(start)) }()
|
||||||
|
|
||||||
|
blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { |
||||||
|
block.SetCoinbase(common.Address{seed}) |
||||||
|
// If a heavy chain is requested, delay blocks to raise difficulty
|
||||||
|
if heavy { |
||||||
|
block.OffsetTime(-1) |
||||||
|
} |
||||||
|
// Include transactions to the miner to make blocks more interesting.
|
||||||
|
if parent == tc.genesis && i%22 == 0 { |
||||||
|
signer := types.MakeSigner(params.TestChainConfig, block.Number()) |
||||||
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
block.AddTx(tx) |
||||||
|
} |
||||||
|
// if the block number is a multiple of 5, add a bonus uncle to the block
|
||||||
|
if i > 0 && i%5 == 0 { |
||||||
|
block.AddUncle(&types.Header{ |
||||||
|
ParentHash: block.PrevBlock(i - 1).Hash(), |
||||||
|
Number: big.NewInt(block.Number().Int64() - 1), |
||||||
|
}) |
||||||
|
} |
||||||
|
}) |
||||||
|
|
||||||
|
// Convert the block-chain into a hash-chain and header/block maps
|
||||||
|
td := new(big.Int).Set(tc.td(parent.Hash())) |
||||||
|
for i, b := range blocks { |
||||||
|
td := td.Add(td, b.Difficulty()) |
||||||
|
hash := b.Hash() |
||||||
|
tc.chain = append(tc.chain, hash) |
||||||
|
tc.blockm[hash] = b |
||||||
|
tc.headerm[hash] = b.Header() |
||||||
|
tc.receiptm[hash] = receipts[i] |
||||||
|
tc.tdm[hash] = new(big.Int).Set(td) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// len returns the total number of blocks in the chain.
|
||||||
|
func (tc *testChain) len() int { |
||||||
|
return len(tc.chain) |
||||||
|
} |
||||||
|
|
||||||
|
// headBlock returns the head of the chain.
|
||||||
|
func (tc *testChain) headBlock() *types.Block { |
||||||
|
return tc.blockm[tc.chain[len(tc.chain)-1]] |
||||||
|
} |
||||||
|
|
||||||
|
// td returns the total difficulty of the given block.
|
||||||
|
func (tc *testChain) td(hash common.Hash) *big.Int { |
||||||
|
return tc.tdm[hash] |
||||||
|
} |
||||||
|
|
||||||
|
// headersByHash returns headers in order from the given hash.
|
||||||
|
func (tc *testChain) headersByHash(origin common.Hash, amount int, skip int, reverse bool) []*types.Header { |
||||||
|
num, _ := tc.hashToNumber(origin) |
||||||
|
return tc.headersByNumber(num, amount, skip, reverse) |
||||||
|
} |
||||||
|
|
||||||
|
// headersByNumber returns headers from the given number.
|
||||||
|
func (tc *testChain) headersByNumber(origin uint64, amount int, skip int, reverse bool) []*types.Header { |
||||||
|
result := make([]*types.Header, 0, amount) |
||||||
|
|
||||||
|
if !reverse { |
||||||
|
for num := origin; num < uint64(len(tc.chain)) && len(result) < amount; num += uint64(skip) + 1 { |
||||||
|
if header, ok := tc.headerm[tc.chain[int(num)]]; ok { |
||||||
|
result = append(result, header) |
||||||
|
} |
||||||
|
} |
||||||
|
} else { |
||||||
|
for num := int64(origin); num >= 0 && len(result) < amount; num -= int64(skip) + 1 { |
||||||
|
if header, ok := tc.headerm[tc.chain[int(num)]]; ok { |
||||||
|
result = append(result, header) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
return result |
||||||
|
} |
||||||
|
|
||||||
|
// receipts returns the receipts of the given block hashes.
|
||||||
|
func (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt { |
||||||
|
results := make([][]*types.Receipt, 0, len(hashes)) |
||||||
|
for _, hash := range hashes { |
||||||
|
if receipt, ok := tc.receiptm[hash]; ok { |
||||||
|
results = append(results, receipt) |
||||||
|
} |
||||||
|
} |
||||||
|
return results |
||||||
|
} |
||||||
|
|
||||||
|
// bodies returns the block bodies of the given block hashes.
|
||||||
|
func (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) { |
||||||
|
transactions := make([][]*types.Transaction, 0, len(hashes)) |
||||||
|
uncles := make([][]*types.Header, 0, len(hashes)) |
||||||
|
for _, hash := range hashes { |
||||||
|
if block, ok := tc.blockm[hash]; ok { |
||||||
|
transactions = append(transactions, block.Transactions()) |
||||||
|
uncles = append(uncles, block.Uncles()) |
||||||
|
} |
||||||
|
} |
||||||
|
return transactions, uncles |
||||||
|
} |
||||||
|
|
||||||
|
func (tc *testChain) hashToNumber(target common.Hash) (uint64, bool) { |
||||||
|
for num, hash := range tc.chain { |
||||||
|
if hash == target { |
||||||
|
return uint64(num), true |
||||||
|
} |
||||||
|
} |
||||||
|
return 0, false |
||||||
|
} |
@ -0,0 +1,79 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package downloader |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
) |
||||||
|
|
||||||
|
// peerDropFn is a callback type for dropping a peer detected as malicious.
|
||||||
|
type peerDropFn func(id string) |
||||||
|
|
||||||
|
// dataPack is a data message returned by a peer for some query.
|
||||||
|
type dataPack interface { |
||||||
|
PeerId() string |
||||||
|
Items() int |
||||||
|
Stats() string |
||||||
|
} |
||||||
|
|
||||||
|
// headerPack is a batch of block headers returned by a peer.
|
||||||
|
type headerPack struct { |
||||||
|
peerID string |
||||||
|
headers []*types.Header |
||||||
|
} |
||||||
|
|
||||||
|
func (p *headerPack) PeerId() string { return p.peerID } |
||||||
|
func (p *headerPack) Items() int { return len(p.headers) } |
||||||
|
func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) } |
||||||
|
|
||||||
|
// bodyPack is a batch of block bodies returned by a peer.
|
||||||
|
type bodyPack struct { |
||||||
|
peerID string |
||||||
|
transactions [][]*types.Transaction |
||||||
|
uncles [][]*types.Header |
||||||
|
} |
||||||
|
|
||||||
|
func (p *bodyPack) PeerId() string { return p.peerID } |
||||||
|
func (p *bodyPack) Items() int { |
||||||
|
if len(p.transactions) <= len(p.uncles) { |
||||||
|
return len(p.transactions) |
||||||
|
} |
||||||
|
return len(p.uncles) |
||||||
|
} |
||||||
|
func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactions), len(p.uncles)) } |
||||||
|
|
||||||
|
// receiptPack is a batch of receipts returned by a peer.
|
||||||
|
type receiptPack struct { |
||||||
|
peerID string |
||||||
|
receipts [][]*types.Receipt |
||||||
|
} |
||||||
|
|
||||||
|
func (p *receiptPack) PeerId() string { return p.peerID } |
||||||
|
func (p *receiptPack) Items() int { return len(p.receipts) } |
||||||
|
func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } |
||||||
|
|
||||||
|
// statePack is a batch of states returned by a peer.
|
||||||
|
type statePack struct { |
||||||
|
peerID string |
||||||
|
states [][]byte |
||||||
|
} |
||||||
|
|
||||||
|
func (p *statePack) PeerId() string { return p.peerID } |
||||||
|
func (p *statePack) Items() int { return len(p.states) } |
||||||
|
func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) } |
@ -0,0 +1,889 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
// This is a temporary package whilst working on the eth/66 blocking refactors.
|
||||||
|
// After that work is done, les needs to be refactored to use the new package,
|
||||||
|
// or alternatively use a stripped down version of it. Either way, we need to
|
||||||
|
// keep the changes scoped so duplicating temporarily seems the sanest.
|
||||||
|
package fetcher |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"math/rand" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/common/prque" |
||||||
|
"github.com/ethereum/go-ethereum/consensus" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/metrics" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
lightTimeout = time.Millisecond // Time allowance before an announced header is explicitly requested
|
||||||
|
arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
|
||||||
|
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
|
||||||
|
fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction
|
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
maxUncleDist = 7 // Maximum allowed backward distance from the chain head
|
||||||
|
maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
|
||||||
|
hashLimit = 256 // Maximum number of unique blocks or headers a peer may have announced
|
||||||
|
blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
|
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil) |
||||||
|
blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil) |
||||||
|
blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil) |
||||||
|
blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil) |
||||||
|
|
||||||
|
blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil) |
||||||
|
blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil) |
||||||
|
blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil) |
||||||
|
blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil) |
||||||
|
|
||||||
|
headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil) |
||||||
|
bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil) |
||||||
|
|
||||||
|
headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil) |
||||||
|
headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil) |
||||||
|
bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil) |
||||||
|
bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil) |
||||||
|
) |
||||||
|
|
||||||
|
var errTerminated = errors.New("terminated") |
||||||
|
|
||||||
|
// HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
|
||||||
|
type HeaderRetrievalFn func(common.Hash) *types.Header |
||||||
|
|
||||||
|
// blockRetrievalFn is a callback type for retrieving a block from the local chain.
|
||||||
|
type blockRetrievalFn func(common.Hash) *types.Block |
||||||
|
|
||||||
|
// headerRequesterFn is a callback type for sending a header retrieval request.
|
||||||
|
type headerRequesterFn func(common.Hash) error |
||||||
|
|
||||||
|
// bodyRequesterFn is a callback type for sending a body retrieval request.
|
||||||
|
type bodyRequesterFn func([]common.Hash) error |
||||||
|
|
||||||
|
// headerVerifierFn is a callback type to verify a block's header for fast propagation.
|
||||||
|
type headerVerifierFn func(header *types.Header) error |
||||||
|
|
||||||
|
// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
|
||||||
|
type blockBroadcasterFn func(block *types.Block, propagate bool) |
||||||
|
|
||||||
|
// chainHeightFn is a callback type to retrieve the current chain height.
|
||||||
|
type chainHeightFn func() uint64 |
||||||
|
|
||||||
|
// headersInsertFn is a callback type to insert a batch of headers into the local chain.
|
||||||
|
type headersInsertFn func(headers []*types.Header) (int, error) |
||||||
|
|
||||||
|
// chainInsertFn is a callback type to insert a batch of blocks into the local chain.
|
||||||
|
type chainInsertFn func(types.Blocks) (int, error) |
||||||
|
|
||||||
|
// peerDropFn is a callback type for dropping a peer detected as malicious.
|
||||||
|
type peerDropFn func(id string) |
||||||
|
|
||||||
|
// blockAnnounce is the hash notification of the availability of a new block in the
|
||||||
|
// network.
|
||||||
|
type blockAnnounce struct { |
||||||
|
hash common.Hash // Hash of the block being announced
|
||||||
|
number uint64 // Number of the block being announced (0 = unknown | old protocol)
|
||||||
|
header *types.Header // Header of the block partially reassembled (new protocol)
|
||||||
|
time time.Time // Timestamp of the announcement
|
||||||
|
|
||||||
|
origin string // Identifier of the peer originating the notification
|
||||||
|
|
||||||
|
fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
|
||||||
|
fetchBodies bodyRequesterFn // Fetcher function to retrieve the body of an announced block
|
||||||
|
} |
||||||
|
|
||||||
|
// headerFilterTask represents a batch of headers needing fetcher filtering.
|
||||||
|
type headerFilterTask struct { |
||||||
|
peer string // The source peer of block headers
|
||||||
|
headers []*types.Header // Collection of headers to filter
|
||||||
|
time time.Time // Arrival time of the headers
|
||||||
|
} |
||||||
|
|
||||||
|
// bodyFilterTask represents a batch of block bodies (transactions and uncles)
|
||||||
|
// needing fetcher filtering.
|
||||||
|
type bodyFilterTask struct { |
||||||
|
peer string // The source peer of block bodies
|
||||||
|
transactions [][]*types.Transaction // Collection of transactions per block bodies
|
||||||
|
uncles [][]*types.Header // Collection of uncles per block bodies
|
||||||
|
time time.Time // Arrival time of the blocks' contents
|
||||||
|
} |
||||||
|
|
||||||
|
// blockOrHeaderInject represents a schedules import operation.
|
||||||
|
type blockOrHeaderInject struct { |
||||||
|
origin string |
||||||
|
|
||||||
|
header *types.Header // Used for light mode fetcher which only cares about header.
|
||||||
|
block *types.Block // Used for normal mode fetcher which imports full block.
|
||||||
|
} |
||||||
|
|
||||||
|
// number returns the block number of the injected object.
|
||||||
|
func (inject *blockOrHeaderInject) number() uint64 { |
||||||
|
if inject.header != nil { |
||||||
|
return inject.header.Number.Uint64() |
||||||
|
} |
||||||
|
return inject.block.NumberU64() |
||||||
|
} |
||||||
|
|
||||||
|
// number returns the block hash of the injected object.
|
||||||
|
func (inject *blockOrHeaderInject) hash() common.Hash { |
||||||
|
if inject.header != nil { |
||||||
|
return inject.header.Hash() |
||||||
|
} |
||||||
|
return inject.block.Hash() |
||||||
|
} |
||||||
|
|
||||||
|
// BlockFetcher is responsible for accumulating block announcements from various peers
|
||||||
|
// and scheduling them for retrieval.
|
||||||
|
type BlockFetcher struct { |
||||||
|
light bool // The indicator whether it's a light fetcher or normal one.
|
||||||
|
|
||||||
|
// Various event channels
|
||||||
|
notify chan *blockAnnounce |
||||||
|
inject chan *blockOrHeaderInject |
||||||
|
|
||||||
|
headerFilter chan chan *headerFilterTask |
||||||
|
bodyFilter chan chan *bodyFilterTask |
||||||
|
|
||||||
|
done chan common.Hash |
||||||
|
quit chan struct{} |
||||||
|
|
||||||
|
// Announce states
|
||||||
|
announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion
|
||||||
|
announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
|
||||||
|
fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching
|
||||||
|
fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
|
||||||
|
completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing
|
||||||
|
|
||||||
|
// Block cache
|
||||||
|
queue *prque.Prque // Queue containing the import operations (block number sorted)
|
||||||
|
queues map[string]int // Per peer block counts to prevent memory exhaustion
|
||||||
|
queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)
|
||||||
|
|
||||||
|
// Callbacks
|
||||||
|
getHeader HeaderRetrievalFn // Retrieves a header from the local chain
|
||||||
|
getBlock blockRetrievalFn // Retrieves a block from the local chain
|
||||||
|
verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work
|
||||||
|
broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
|
||||||
|
chainHeight chainHeightFn // Retrieves the current chain's height
|
||||||
|
insertHeaders headersInsertFn // Injects a batch of headers into the chain
|
||||||
|
insertChain chainInsertFn // Injects a batch of blocks into the chain
|
||||||
|
dropPeer peerDropFn // Drops a peer for misbehaving
|
||||||
|
|
||||||
|
// Testing hooks
|
||||||
|
announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
|
||||||
|
queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
|
||||||
|
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
|
||||||
|
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
|
||||||
|
importedHook func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)
|
||||||
|
} |
||||||
|
|
||||||
|
// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
|
||||||
|
func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher { |
||||||
|
return &BlockFetcher{ |
||||||
|
light: light, |
||||||
|
notify: make(chan *blockAnnounce), |
||||||
|
inject: make(chan *blockOrHeaderInject), |
||||||
|
headerFilter: make(chan chan *headerFilterTask), |
||||||
|
bodyFilter: make(chan chan *bodyFilterTask), |
||||||
|
done: make(chan common.Hash), |
||||||
|
quit: make(chan struct{}), |
||||||
|
announces: make(map[string]int), |
||||||
|
announced: make(map[common.Hash][]*blockAnnounce), |
||||||
|
fetching: make(map[common.Hash]*blockAnnounce), |
||||||
|
fetched: make(map[common.Hash][]*blockAnnounce), |
||||||
|
completing: make(map[common.Hash]*blockAnnounce), |
||||||
|
queue: prque.New(nil), |
||||||
|
queues: make(map[string]int), |
||||||
|
queued: make(map[common.Hash]*blockOrHeaderInject), |
||||||
|
getHeader: getHeader, |
||||||
|
getBlock: getBlock, |
||||||
|
verifyHeader: verifyHeader, |
||||||
|
broadcastBlock: broadcastBlock, |
||||||
|
chainHeight: chainHeight, |
||||||
|
insertHeaders: insertHeaders, |
||||||
|
insertChain: insertChain, |
||||||
|
dropPeer: dropPeer, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Start boots up the announcement based synchroniser, accepting and processing
|
||||||
|
// hash notifications and block fetches until termination requested.
|
||||||
|
func (f *BlockFetcher) Start() { |
||||||
|
go f.loop() |
||||||
|
} |
||||||
|
|
||||||
|
// Stop terminates the announcement based synchroniser, canceling all pending
|
||||||
|
// operations.
|
||||||
|
func (f *BlockFetcher) Stop() { |
||||||
|
close(f.quit) |
||||||
|
} |
||||||
|
|
||||||
|
// Notify announces the fetcher of the potential availability of a new block in
|
||||||
|
// the network.
|
||||||
|
func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time, |
||||||
|
headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error { |
||||||
|
block := &blockAnnounce{ |
||||||
|
hash: hash, |
||||||
|
number: number, |
||||||
|
time: time, |
||||||
|
origin: peer, |
||||||
|
fetchHeader: headerFetcher, |
||||||
|
fetchBodies: bodyFetcher, |
||||||
|
} |
||||||
|
select { |
||||||
|
case f.notify <- block: |
||||||
|
return nil |
||||||
|
case <-f.quit: |
||||||
|
return errTerminated |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Enqueue tries to fill gaps the fetcher's future import queue.
|
||||||
|
func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error { |
||||||
|
op := &blockOrHeaderInject{ |
||||||
|
origin: peer, |
||||||
|
block: block, |
||||||
|
} |
||||||
|
select { |
||||||
|
case f.inject <- op: |
||||||
|
return nil |
||||||
|
case <-f.quit: |
||||||
|
return errTerminated |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
|
||||||
|
// returning those that should be handled differently.
|
||||||
|
func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header { |
||||||
|
log.Trace("Filtering headers", "peer", peer, "headers", len(headers)) |
||||||
|
|
||||||
|
// Send the filter channel to the fetcher
|
||||||
|
filter := make(chan *headerFilterTask) |
||||||
|
|
||||||
|
select { |
||||||
|
case f.headerFilter <- filter: |
||||||
|
case <-f.quit: |
||||||
|
return nil |
||||||
|
} |
||||||
|
// Request the filtering of the header list
|
||||||
|
select { |
||||||
|
case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}: |
||||||
|
case <-f.quit: |
||||||
|
return nil |
||||||
|
} |
||||||
|
// Retrieve the headers remaining after filtering
|
||||||
|
select { |
||||||
|
case task := <-filter: |
||||||
|
return task.headers |
||||||
|
case <-f.quit: |
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// FilterBodies extracts all the block bodies that were explicitly requested by
|
||||||
|
// the fetcher, returning those that should be handled differently.
|
||||||
|
func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { |
||||||
|
log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles)) |
||||||
|
|
||||||
|
// Send the filter channel to the fetcher
|
||||||
|
filter := make(chan *bodyFilterTask) |
||||||
|
|
||||||
|
select { |
||||||
|
case f.bodyFilter <- filter: |
||||||
|
case <-f.quit: |
||||||
|
return nil, nil |
||||||
|
} |
||||||
|
// Request the filtering of the body list
|
||||||
|
select { |
||||||
|
case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}: |
||||||
|
case <-f.quit: |
||||||
|
return nil, nil |
||||||
|
} |
||||||
|
// Retrieve the bodies remaining after filtering
|
||||||
|
select { |
||||||
|
case task := <-filter: |
||||||
|
return task.transactions, task.uncles |
||||||
|
case <-f.quit: |
||||||
|
return nil, nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Loop is the main fetcher loop, checking and processing various notification
|
||||||
|
// events.
|
||||||
|
func (f *BlockFetcher) loop() { |
||||||
|
// Iterate the block fetching until a quit is requested
|
||||||
|
var ( |
||||||
|
fetchTimer = time.NewTimer(0) |
||||||
|
completeTimer = time.NewTimer(0) |
||||||
|
) |
||||||
|
<-fetchTimer.C // clear out the channel
|
||||||
|
<-completeTimer.C |
||||||
|
defer fetchTimer.Stop() |
||||||
|
defer completeTimer.Stop() |
||||||
|
|
||||||
|
for { |
||||||
|
// Clean up any expired block fetches
|
||||||
|
for hash, announce := range f.fetching { |
||||||
|
if time.Since(announce.time) > fetchTimeout { |
||||||
|
f.forgetHash(hash) |
||||||
|
} |
||||||
|
} |
||||||
|
// Import any queued blocks that could potentially fit
|
||||||
|
height := f.chainHeight() |
||||||
|
for !f.queue.Empty() { |
||||||
|
op := f.queue.PopItem().(*blockOrHeaderInject) |
||||||
|
hash := op.hash() |
||||||
|
if f.queueChangeHook != nil { |
||||||
|
f.queueChangeHook(hash, false) |
||||||
|
} |
||||||
|
// If too high up the chain or phase, continue later
|
||||||
|
number := op.number() |
||||||
|
if number > height+1 { |
||||||
|
f.queue.Push(op, -int64(number)) |
||||||
|
if f.queueChangeHook != nil { |
||||||
|
f.queueChangeHook(hash, true) |
||||||
|
} |
||||||
|
break |
||||||
|
} |
||||||
|
// Otherwise if fresh and still unknown, try and import
|
||||||
|
if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) { |
||||||
|
f.forgetBlock(hash) |
||||||
|
continue |
||||||
|
} |
||||||
|
if f.light { |
||||||
|
f.importHeaders(op.origin, op.header) |
||||||
|
} else { |
||||||
|
f.importBlocks(op.origin, op.block) |
||||||
|
} |
||||||
|
} |
||||||
|
// Wait for an outside event to occur
|
||||||
|
select { |
||||||
|
case <-f.quit: |
||||||
|
// BlockFetcher terminating, abort all operations
|
||||||
|
return |
||||||
|
|
||||||
|
case notification := <-f.notify: |
||||||
|
// A block was announced, make sure the peer isn't DOSing us
|
||||||
|
blockAnnounceInMeter.Mark(1) |
||||||
|
|
||||||
|
count := f.announces[notification.origin] + 1 |
||||||
|
if count > hashLimit { |
||||||
|
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit) |
||||||
|
blockAnnounceDOSMeter.Mark(1) |
||||||
|
break |
||||||
|
} |
||||||
|
// If we have a valid block number, check that it's potentially useful
|
||||||
|
if notification.number > 0 { |
||||||
|
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { |
||||||
|
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) |
||||||
|
blockAnnounceDropMeter.Mark(1) |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
// All is well, schedule the announce if block's not yet downloading
|
||||||
|
if _, ok := f.fetching[notification.hash]; ok { |
||||||
|
break |
||||||
|
} |
||||||
|
if _, ok := f.completing[notification.hash]; ok { |
||||||
|
break |
||||||
|
} |
||||||
|
f.announces[notification.origin] = count |
||||||
|
f.announced[notification.hash] = append(f.announced[notification.hash], notification) |
||||||
|
if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { |
||||||
|
f.announceChangeHook(notification.hash, true) |
||||||
|
} |
||||||
|
if len(f.announced) == 1 { |
||||||
|
f.rescheduleFetch(fetchTimer) |
||||||
|
} |
||||||
|
|
||||||
|
case op := <-f.inject: |
||||||
|
// A direct block insertion was requested, try and fill any pending gaps
|
||||||
|
blockBroadcastInMeter.Mark(1) |
||||||
|
|
||||||
|
// Now only direct block injection is allowed, drop the header injection
|
||||||
|
// here silently if we receive.
|
||||||
|
if f.light { |
||||||
|
continue |
||||||
|
} |
||||||
|
f.enqueue(op.origin, nil, op.block) |
||||||
|
|
||||||
|
case hash := <-f.done: |
||||||
|
// A pending import finished, remove all traces of the notification
|
||||||
|
f.forgetHash(hash) |
||||||
|
f.forgetBlock(hash) |
||||||
|
|
||||||
|
case <-fetchTimer.C: |
||||||
|
// At least one block's timer ran out, check for needing retrieval
|
||||||
|
request := make(map[string][]common.Hash) |
||||||
|
|
||||||
|
for hash, announces := range f.announced { |
||||||
|
// In current LES protocol(les2/les3), only header announce is
|
||||||
|
// available, no need to wait too much time for header broadcast.
|
||||||
|
timeout := arriveTimeout - gatherSlack |
||||||
|
if f.light { |
||||||
|
timeout = 0 |
||||||
|
} |
||||||
|
if time.Since(announces[0].time) > timeout { |
||||||
|
// Pick a random peer to retrieve from, reset all others
|
||||||
|
announce := announces[rand.Intn(len(announces))] |
||||||
|
f.forgetHash(hash) |
||||||
|
|
||||||
|
// If the block still didn't arrive, queue for fetching
|
||||||
|
if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) { |
||||||
|
request[announce.origin] = append(request[announce.origin], hash) |
||||||
|
f.fetching[hash] = announce |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
// Send out all block header requests
|
||||||
|
for peer, hashes := range request { |
||||||
|
log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes) |
||||||
|
|
||||||
|
// Create a closure of the fetch and schedule in on a new thread
|
||||||
|
fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes |
||||||
|
go func() { |
||||||
|
if f.fetchingHook != nil { |
||||||
|
f.fetchingHook(hashes) |
||||||
|
} |
||||||
|
for _, hash := range hashes { |
||||||
|
headerFetchMeter.Mark(1) |
||||||
|
fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
|
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
// Schedule the next fetch if blocks are still pending
|
||||||
|
f.rescheduleFetch(fetchTimer) |
||||||
|
|
||||||
|
case <-completeTimer.C: |
||||||
|
// At least one header's timer ran out, retrieve everything
|
||||||
|
request := make(map[string][]common.Hash) |
||||||
|
|
||||||
|
for hash, announces := range f.fetched { |
||||||
|
// Pick a random peer to retrieve from, reset all others
|
||||||
|
announce := announces[rand.Intn(len(announces))] |
||||||
|
f.forgetHash(hash) |
||||||
|
|
||||||
|
// If the block still didn't arrive, queue for completion
|
||||||
|
if f.getBlock(hash) == nil { |
||||||
|
request[announce.origin] = append(request[announce.origin], hash) |
||||||
|
f.completing[hash] = announce |
||||||
|
} |
||||||
|
} |
||||||
|
// Send out all block body requests
|
||||||
|
for peer, hashes := range request { |
||||||
|
log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes) |
||||||
|
|
||||||
|
// Create a closure of the fetch and schedule in on a new thread
|
||||||
|
if f.completingHook != nil { |
||||||
|
f.completingHook(hashes) |
||||||
|
} |
||||||
|
bodyFetchMeter.Mark(int64(len(hashes))) |
||||||
|
go f.completing[hashes[0]].fetchBodies(hashes) |
||||||
|
} |
||||||
|
// Schedule the next fetch if blocks are still pending
|
||||||
|
f.rescheduleComplete(completeTimer) |
||||||
|
|
||||||
|
case filter := <-f.headerFilter: |
||||||
|
// Headers arrived from a remote peer. Extract those that were explicitly
|
||||||
|
// requested by the fetcher, and return everything else so it's delivered
|
||||||
|
// to other parts of the system.
|
||||||
|
var task *headerFilterTask |
||||||
|
select { |
||||||
|
case task = <-filter: |
||||||
|
case <-f.quit: |
||||||
|
return |
||||||
|
} |
||||||
|
headerFilterInMeter.Mark(int64(len(task.headers))) |
||||||
|
|
||||||
|
// Split the batch of headers into unknown ones (to return to the caller),
|
||||||
|
// known incomplete ones (requiring body retrievals) and completed blocks.
|
||||||
|
unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{} |
||||||
|
for _, header := range task.headers { |
||||||
|
hash := header.Hash() |
||||||
|
|
||||||
|
// Filter fetcher-requested headers from other synchronisation algorithms
|
||||||
|
if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { |
||||||
|
// If the delivered header does not match the promised number, drop the announcer
|
||||||
|
if header.Number.Uint64() != announce.number { |
||||||
|
log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number) |
||||||
|
f.dropPeer(announce.origin) |
||||||
|
f.forgetHash(hash) |
||||||
|
continue |
||||||
|
} |
||||||
|
// Collect all headers only if we are running in light
|
||||||
|
// mode and the headers are not imported by other means.
|
||||||
|
if f.light { |
||||||
|
if f.getHeader(hash) == nil { |
||||||
|
announce.header = header |
||||||
|
lightHeaders = append(lightHeaders, announce) |
||||||
|
} |
||||||
|
f.forgetHash(hash) |
||||||
|
continue |
||||||
|
} |
||||||
|
// Only keep if not imported by other means
|
||||||
|
if f.getBlock(hash) == nil { |
||||||
|
announce.header = header |
||||||
|
announce.time = task.time |
||||||
|
|
||||||
|
// If the block is empty (header only), short circuit into the final import queue
|
||||||
|
if header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash { |
||||||
|
log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) |
||||||
|
|
||||||
|
block := types.NewBlockWithHeader(header) |
||||||
|
block.ReceivedAt = task.time |
||||||
|
|
||||||
|
complete = append(complete, block) |
||||||
|
f.completing[hash] = announce |
||||||
|
continue |
||||||
|
} |
||||||
|
// Otherwise add to the list of blocks needing completion
|
||||||
|
incomplete = append(incomplete, announce) |
||||||
|
} else { |
||||||
|
log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) |
||||||
|
f.forgetHash(hash) |
||||||
|
} |
||||||
|
} else { |
||||||
|
// BlockFetcher doesn't know about it, add to the return list
|
||||||
|
unknown = append(unknown, header) |
||||||
|
} |
||||||
|
} |
||||||
|
headerFilterOutMeter.Mark(int64(len(unknown))) |
||||||
|
select { |
||||||
|
case filter <- &headerFilterTask{headers: unknown, time: task.time}: |
||||||
|
case <-f.quit: |
||||||
|
return |
||||||
|
} |
||||||
|
// Schedule the retrieved headers for body completion
|
||||||
|
for _, announce := range incomplete { |
||||||
|
hash := announce.header.Hash() |
||||||
|
if _, ok := f.completing[hash]; ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
f.fetched[hash] = append(f.fetched[hash], announce) |
||||||
|
if len(f.fetched) == 1 { |
||||||
|
f.rescheduleComplete(completeTimer) |
||||||
|
} |
||||||
|
} |
||||||
|
// Schedule the header for light fetcher import
|
||||||
|
for _, announce := range lightHeaders { |
||||||
|
f.enqueue(announce.origin, announce.header, nil) |
||||||
|
} |
||||||
|
// Schedule the header-only blocks for import
|
||||||
|
for _, block := range complete { |
||||||
|
if announce := f.completing[block.Hash()]; announce != nil { |
||||||
|
f.enqueue(announce.origin, nil, block) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
case filter := <-f.bodyFilter: |
||||||
|
// Block bodies arrived, extract any explicitly requested blocks, return the rest
|
||||||
|
var task *bodyFilterTask |
||||||
|
select { |
||||||
|
case task = <-filter: |
||||||
|
case <-f.quit: |
||||||
|
return |
||||||
|
} |
||||||
|
bodyFilterInMeter.Mark(int64(len(task.transactions))) |
||||||
|
blocks := []*types.Block{} |
||||||
|
// abort early if there's nothing explicitly requested
|
||||||
|
if len(f.completing) > 0 { |
||||||
|
for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ { |
||||||
|
// Match up a body to any possible completion request
|
||||||
|
var ( |
||||||
|
matched = false |
||||||
|
uncleHash common.Hash // calculated lazily and reused
|
||||||
|
txnHash common.Hash // calculated lazily and reused
|
||||||
|
) |
||||||
|
for hash, announce := range f.completing { |
||||||
|
if f.queued[hash] != nil || announce.origin != task.peer { |
||||||
|
continue |
||||||
|
} |
||||||
|
if uncleHash == (common.Hash{}) { |
||||||
|
uncleHash = types.CalcUncleHash(task.uncles[i]) |
||||||
|
} |
||||||
|
if uncleHash != announce.header.UncleHash { |
||||||
|
continue |
||||||
|
} |
||||||
|
if txnHash == (common.Hash{}) { |
||||||
|
txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil)) |
||||||
|
} |
||||||
|
if txnHash != announce.header.TxHash { |
||||||
|
continue |
||||||
|
} |
||||||
|
// Mark the body matched, reassemble if still unknown
|
||||||
|
matched = true |
||||||
|
if f.getBlock(hash) == nil { |
||||||
|
block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]) |
||||||
|
block.ReceivedAt = task.time |
||||||
|
blocks = append(blocks, block) |
||||||
|
} else { |
||||||
|
f.forgetHash(hash) |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
if matched { |
||||||
|
task.transactions = append(task.transactions[:i], task.transactions[i+1:]...) |
||||||
|
task.uncles = append(task.uncles[:i], task.uncles[i+1:]...) |
||||||
|
i-- |
||||||
|
continue |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
bodyFilterOutMeter.Mark(int64(len(task.transactions))) |
||||||
|
select { |
||||||
|
case filter <- task: |
||||||
|
case <-f.quit: |
||||||
|
return |
||||||
|
} |
||||||
|
// Schedule the retrieved blocks for ordered import
|
||||||
|
for _, block := range blocks { |
||||||
|
if announce := f.completing[block.Hash()]; announce != nil { |
||||||
|
f.enqueue(announce.origin, nil, block) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
|
||||||
|
func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) { |
||||||
|
// Short circuit if no blocks are announced
|
||||||
|
if len(f.announced) == 0 { |
||||||
|
return |
||||||
|
} |
||||||
|
// Schedule announcement retrieval quickly for light mode
|
||||||
|
// since server won't send any headers to client.
|
||||||
|
if f.light { |
||||||
|
fetch.Reset(lightTimeout) |
||||||
|
return |
||||||
|
} |
||||||
|
// Otherwise find the earliest expiring announcement
|
||||||
|
earliest := time.Now() |
||||||
|
for _, announces := range f.announced { |
||||||
|
if earliest.After(announces[0].time) { |
||||||
|
earliest = announces[0].time |
||||||
|
} |
||||||
|
} |
||||||
|
fetch.Reset(arriveTimeout - time.Since(earliest)) |
||||||
|
} |
||||||
|
|
||||||
|
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
|
||||||
|
func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) { |
||||||
|
// Short circuit if no headers are fetched
|
||||||
|
if len(f.fetched) == 0 { |
||||||
|
return |
||||||
|
} |
||||||
|
// Otherwise find the earliest expiring announcement
|
||||||
|
earliest := time.Now() |
||||||
|
for _, announces := range f.fetched { |
||||||
|
if earliest.After(announces[0].time) { |
||||||
|
earliest = announces[0].time |
||||||
|
} |
||||||
|
} |
||||||
|
complete.Reset(gatherSlack - time.Since(earliest)) |
||||||
|
} |
||||||
|
|
||||||
|
// enqueue schedules a new header or block import operation, if the component
|
||||||
|
// to be imported has not yet been seen.
|
||||||
|
func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) { |
||||||
|
var ( |
||||||
|
hash common.Hash |
||||||
|
number uint64 |
||||||
|
) |
||||||
|
if header != nil { |
||||||
|
hash, number = header.Hash(), header.Number.Uint64() |
||||||
|
} else { |
||||||
|
hash, number = block.Hash(), block.NumberU64() |
||||||
|
} |
||||||
|
// Ensure the peer isn't DOSing us
|
||||||
|
count := f.queues[peer] + 1 |
||||||
|
if count > blockLimit { |
||||||
|
log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit) |
||||||
|
blockBroadcastDOSMeter.Mark(1) |
||||||
|
f.forgetHash(hash) |
||||||
|
return |
||||||
|
} |
||||||
|
// Discard any past or too distant blocks
|
||||||
|
if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { |
||||||
|
log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist) |
||||||
|
blockBroadcastDropMeter.Mark(1) |
||||||
|
f.forgetHash(hash) |
||||||
|
return |
||||||
|
} |
||||||
|
// Schedule the block for future importing
|
||||||
|
if _, ok := f.queued[hash]; !ok { |
||||||
|
op := &blockOrHeaderInject{origin: peer} |
||||||
|
if header != nil { |
||||||
|
op.header = header |
||||||
|
} else { |
||||||
|
op.block = block |
||||||
|
} |
||||||
|
f.queues[peer] = count |
||||||
|
f.queued[hash] = op |
||||||
|
f.queue.Push(op, -int64(number)) |
||||||
|
if f.queueChangeHook != nil { |
||||||
|
f.queueChangeHook(hash, true) |
||||||
|
} |
||||||
|
log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size()) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// importHeaders spawns a new goroutine to run a header insertion into the chain.
|
||||||
|
// If the header's number is at the same height as the current import phase, it
|
||||||
|
// updates the phase states accordingly.
|
||||||
|
func (f *BlockFetcher) importHeaders(peer string, header *types.Header) { |
||||||
|
hash := header.Hash() |
||||||
|
log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash) |
||||||
|
|
||||||
|
go func() { |
||||||
|
defer func() { f.done <- hash }() |
||||||
|
// If the parent's unknown, abort insertion
|
||||||
|
parent := f.getHeader(header.ParentHash) |
||||||
|
if parent == nil { |
||||||
|
log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash) |
||||||
|
return |
||||||
|
} |
||||||
|
// Validate the header and if something went wrong, drop the peer
|
||||||
|
if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock { |
||||||
|
log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err) |
||||||
|
f.dropPeer(peer) |
||||||
|
return |
||||||
|
} |
||||||
|
// Run the actual import and log any issues
|
||||||
|
if _, err := f.insertHeaders([]*types.Header{header}); err != nil { |
||||||
|
log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err) |
||||||
|
return |
||||||
|
} |
||||||
|
// Invoke the testing hook if needed
|
||||||
|
if f.importedHook != nil { |
||||||
|
f.importedHook(header, nil) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
// importBlocks spawns a new goroutine to run a block insertion into the chain. If the
|
||||||
|
// block's number is at the same height as the current import phase, it updates
|
||||||
|
// the phase states accordingly.
|
||||||
|
func (f *BlockFetcher) importBlocks(peer string, block *types.Block) { |
||||||
|
hash := block.Hash() |
||||||
|
|
||||||
|
// Run the import on a new thread
|
||||||
|
log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) |
||||||
|
go func() { |
||||||
|
defer func() { f.done <- hash }() |
||||||
|
|
||||||
|
// If the parent's unknown, abort insertion
|
||||||
|
parent := f.getBlock(block.ParentHash()) |
||||||
|
if parent == nil { |
||||||
|
log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) |
||||||
|
return |
||||||
|
} |
||||||
|
// Quickly validate the header and propagate the block if it passes
|
||||||
|
switch err := f.verifyHeader(block.Header()); err { |
||||||
|
case nil: |
||||||
|
// All ok, quickly propagate to our peers
|
||||||
|
blockBroadcastOutTimer.UpdateSince(block.ReceivedAt) |
||||||
|
go f.broadcastBlock(block, true) |
||||||
|
|
||||||
|
case consensus.ErrFutureBlock: |
||||||
|
// Weird future block, don't fail, but neither propagate
|
||||||
|
|
||||||
|
default: |
||||||
|
// Something went very wrong, drop the peer
|
||||||
|
log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) |
||||||
|
f.dropPeer(peer) |
||||||
|
return |
||||||
|
} |
||||||
|
// Run the actual import and log any issues
|
||||||
|
if _, err := f.insertChain(types.Blocks{block}); err != nil { |
||||||
|
log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) |
||||||
|
return |
||||||
|
} |
||||||
|
// If import succeeded, broadcast the block
|
||||||
|
blockAnnounceOutTimer.UpdateSince(block.ReceivedAt) |
||||||
|
go f.broadcastBlock(block, false) |
||||||
|
|
||||||
|
// Invoke the testing hook if needed
|
||||||
|
if f.importedHook != nil { |
||||||
|
f.importedHook(nil, block) |
||||||
|
} |
||||||
|
}() |
||||||
|
} |
||||||
|
|
||||||
|
// forgetHash removes all traces of a block announcement from the fetcher's
|
||||||
|
// internal state.
|
||||||
|
func (f *BlockFetcher) forgetHash(hash common.Hash) { |
||||||
|
// Remove all pending announces and decrement DOS counters
|
||||||
|
if announceMap, ok := f.announced[hash]; ok { |
||||||
|
for _, announce := range announceMap { |
||||||
|
f.announces[announce.origin]-- |
||||||
|
if f.announces[announce.origin] <= 0 { |
||||||
|
delete(f.announces, announce.origin) |
||||||
|
} |
||||||
|
} |
||||||
|
delete(f.announced, hash) |
||||||
|
if f.announceChangeHook != nil { |
||||||
|
f.announceChangeHook(hash, false) |
||||||
|
} |
||||||
|
} |
||||||
|
// Remove any pending fetches and decrement the DOS counters
|
||||||
|
if announce := f.fetching[hash]; announce != nil { |
||||||
|
f.announces[announce.origin]-- |
||||||
|
if f.announces[announce.origin] <= 0 { |
||||||
|
delete(f.announces, announce.origin) |
||||||
|
} |
||||||
|
delete(f.fetching, hash) |
||||||
|
} |
||||||
|
|
||||||
|
// Remove any pending completion requests and decrement the DOS counters
|
||||||
|
for _, announce := range f.fetched[hash] { |
||||||
|
f.announces[announce.origin]-- |
||||||
|
if f.announces[announce.origin] <= 0 { |
||||||
|
delete(f.announces, announce.origin) |
||||||
|
} |
||||||
|
} |
||||||
|
delete(f.fetched, hash) |
||||||
|
|
||||||
|
// Remove any pending completions and decrement the DOS counters
|
||||||
|
if announce := f.completing[hash]; announce != nil { |
||||||
|
f.announces[announce.origin]-- |
||||||
|
if f.announces[announce.origin] <= 0 { |
||||||
|
delete(f.announces, announce.origin) |
||||||
|
} |
||||||
|
delete(f.completing, hash) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// forgetBlock removes all traces of a queued block from the fetcher's internal
|
||||||
|
// state.
|
||||||
|
func (f *BlockFetcher) forgetBlock(hash common.Hash) { |
||||||
|
if insert := f.queued[hash]; insert != nil { |
||||||
|
f.queues[insert.origin]-- |
||||||
|
if f.queues[insert.origin] == 0 { |
||||||
|
delete(f.queues, insert.origin) |
||||||
|
} |
||||||
|
delete(f.queued, hash) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1,896 @@ |
|||||||
|
// Copyright 2015 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package fetcher |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"math/big" |
||||||
|
"sync" |
||||||
|
"sync/atomic" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||||
|
"github.com/ethereum/go-ethereum/core" |
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb" |
||||||
|
"github.com/ethereum/go-ethereum/core/types" |
||||||
|
"github.com/ethereum/go-ethereum/crypto" |
||||||
|
"github.com/ethereum/go-ethereum/params" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
testdb = rawdb.NewMemoryDatabase() |
||||||
|
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") |
||||||
|
testAddress = crypto.PubkeyToAddress(testKey.PublicKey) |
||||||
|
genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000000000)) |
||||||
|
unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit, BaseFee: big.NewInt(params.InitialBaseFee)}, nil, nil, nil, trie.NewStackTrie(nil)) |
||||||
|
) |
||||||
|
|
||||||
|
// makeChain creates a chain of n blocks starting at and including parent.
|
||||||
|
// the returned hash chain is ordered head->parent. In addition, every 3rd block
|
||||||
|
// contains a transaction and every 5th an uncle to allow testing correct block
|
||||||
|
// reassembly.
|
||||||
|
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { |
||||||
|
blocks, _ := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) { |
||||||
|
block.SetCoinbase(common.Address{seed}) |
||||||
|
|
||||||
|
// If the block number is multiple of 3, send a bonus transaction to the miner
|
||||||
|
if parent == genesis && i%3 == 0 { |
||||||
|
signer := types.MakeSigner(params.TestChainConfig, block.Number()) |
||||||
|
tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
block.AddTx(tx) |
||||||
|
} |
||||||
|
// If the block number is a multiple of 5, add a bonus uncle to the block
|
||||||
|
if i%5 == 0 { |
||||||
|
block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))}) |
||||||
|
} |
||||||
|
}) |
||||||
|
hashes := make([]common.Hash, n+1) |
||||||
|
hashes[len(hashes)-1] = parent.Hash() |
||||||
|
blockm := make(map[common.Hash]*types.Block, n+1) |
||||||
|
blockm[parent.Hash()] = parent |
||||||
|
for i, b := range blocks { |
||||||
|
hashes[len(hashes)-i-2] = b.Hash() |
||||||
|
blockm[b.Hash()] = b |
||||||
|
} |
||||||
|
return hashes, blockm |
||||||
|
} |
||||||
|
|
||||||
|
// fetcherTester is a test simulator for mocking out local block chain.
|
||||||
|
type fetcherTester struct { |
||||||
|
fetcher *BlockFetcher |
||||||
|
|
||||||
|
hashes []common.Hash // Hash chain belonging to the tester
|
||||||
|
headers map[common.Hash]*types.Header // Headers belonging to the tester
|
||||||
|
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
|
||||||
|
drops map[string]bool // Map of peers dropped by the fetcher
|
||||||
|
|
||||||
|
lock sync.RWMutex |
||||||
|
} |
||||||
|
|
||||||
|
// newTester creates a new fetcher test mocker.
|
||||||
|
func newTester(light bool) *fetcherTester { |
||||||
|
tester := &fetcherTester{ |
||||||
|
hashes: []common.Hash{genesis.Hash()}, |
||||||
|
headers: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()}, |
||||||
|
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis}, |
||||||
|
drops: make(map[string]bool), |
||||||
|
} |
||||||
|
tester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer) |
||||||
|
tester.fetcher.Start() |
||||||
|
|
||||||
|
return tester |
||||||
|
} |
||||||
|
|
||||||
|
// getHeader retrieves a header from the tester's block chain.
|
||||||
|
func (f *fetcherTester) getHeader(hash common.Hash) *types.Header { |
||||||
|
f.lock.RLock() |
||||||
|
defer f.lock.RUnlock() |
||||||
|
|
||||||
|
return f.headers[hash] |
||||||
|
} |
||||||
|
|
||||||
|
// getBlock retrieves a block from the tester's block chain.
|
||||||
|
func (f *fetcherTester) getBlock(hash common.Hash) *types.Block { |
||||||
|
f.lock.RLock() |
||||||
|
defer f.lock.RUnlock() |
||||||
|
|
||||||
|
return f.blocks[hash] |
||||||
|
} |
||||||
|
|
||||||
|
// verifyHeader is a nop placeholder for the block header verification.
|
||||||
|
func (f *fetcherTester) verifyHeader(header *types.Header) error { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// broadcastBlock is a nop placeholder for the block broadcasting.
|
||||||
|
func (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) { |
||||||
|
} |
||||||
|
|
||||||
|
// chainHeight retrieves the current height (block number) of the chain.
|
||||||
|
func (f *fetcherTester) chainHeight() uint64 { |
||||||
|
f.lock.RLock() |
||||||
|
defer f.lock.RUnlock() |
||||||
|
|
||||||
|
if f.fetcher.light { |
||||||
|
return f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() |
||||||
|
} |
||||||
|
return f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() |
||||||
|
} |
||||||
|
|
||||||
|
// insertChain injects a new headers into the simulated chain.
|
||||||
|
func (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) { |
||||||
|
f.lock.Lock() |
||||||
|
defer f.lock.Unlock() |
||||||
|
|
||||||
|
for i, header := range headers { |
||||||
|
// Make sure the parent in known
|
||||||
|
if _, ok := f.headers[header.ParentHash]; !ok { |
||||||
|
return i, errors.New("unknown parent") |
||||||
|
} |
||||||
|
// Discard any new blocks if the same height already exists
|
||||||
|
if header.Number.Uint64() <= f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() { |
||||||
|
return i, nil |
||||||
|
} |
||||||
|
// Otherwise build our current chain
|
||||||
|
f.hashes = append(f.hashes, header.Hash()) |
||||||
|
f.headers[header.Hash()] = header |
||||||
|
} |
||||||
|
return 0, nil |
||||||
|
} |
||||||
|
|
||||||
|
// insertChain injects a new blocks into the simulated chain.
|
||||||
|
func (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) { |
||||||
|
f.lock.Lock() |
||||||
|
defer f.lock.Unlock() |
||||||
|
|
||||||
|
for i, block := range blocks { |
||||||
|
// Make sure the parent in known
|
||||||
|
if _, ok := f.blocks[block.ParentHash()]; !ok { |
||||||
|
return i, errors.New("unknown parent") |
||||||
|
} |
||||||
|
// Discard any new blocks if the same height already exists
|
||||||
|
if block.NumberU64() <= f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() { |
||||||
|
return i, nil |
||||||
|
} |
||||||
|
// Otherwise build our current chain
|
||||||
|
f.hashes = append(f.hashes, block.Hash()) |
||||||
|
f.blocks[block.Hash()] = block |
||||||
|
} |
||||||
|
return 0, nil |
||||||
|
} |
||||||
|
|
||||||
|
// dropPeer is an emulator for the peer removal, simply accumulating the various
|
||||||
|
// peers dropped by the fetcher.
|
||||||
|
func (f *fetcherTester) dropPeer(peer string) { |
||||||
|
f.lock.Lock() |
||||||
|
defer f.lock.Unlock() |
||||||
|
|
||||||
|
f.drops[peer] = true |
||||||
|
} |
||||||
|
|
||||||
|
// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.
|
||||||
|
func (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn { |
||||||
|
closure := make(map[common.Hash]*types.Block) |
||||||
|
for hash, block := range blocks { |
||||||
|
closure[hash] = block |
||||||
|
} |
||||||
|
// Create a function that return a header from the closure
|
||||||
|
return func(hash common.Hash) error { |
||||||
|
// Gather the blocks to return
|
||||||
|
headers := make([]*types.Header, 0, 1) |
||||||
|
if block, ok := closure[hash]; ok { |
||||||
|
headers = append(headers, block.Header()) |
||||||
|
} |
||||||
|
// Return on a new thread
|
||||||
|
go f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift)) |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer.
|
||||||
|
func (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn { |
||||||
|
closure := make(map[common.Hash]*types.Block) |
||||||
|
for hash, block := range blocks { |
||||||
|
closure[hash] = block |
||||||
|
} |
||||||
|
// Create a function that returns blocks from the closure
|
||||||
|
return func(hashes []common.Hash) error { |
||||||
|
// Gather the block bodies to return
|
||||||
|
transactions := make([][]*types.Transaction, 0, len(hashes)) |
||||||
|
uncles := make([][]*types.Header, 0, len(hashes)) |
||||||
|
|
||||||
|
for _, hash := range hashes { |
||||||
|
if block, ok := closure[hash]; ok { |
||||||
|
transactions = append(transactions, block.Transactions()) |
||||||
|
uncles = append(uncles, block.Uncles()) |
||||||
|
} |
||||||
|
} |
||||||
|
// Return on a new thread
|
||||||
|
go f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift)) |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// verifyFetchingEvent verifies that one single event arrive on a fetching channel.
|
||||||
|
func verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) { |
||||||
|
if arrive { |
||||||
|
select { |
||||||
|
case <-fetching: |
||||||
|
case <-time.After(time.Second): |
||||||
|
t.Fatalf("fetching timeout") |
||||||
|
} |
||||||
|
} else { |
||||||
|
select { |
||||||
|
case <-fetching: |
||||||
|
t.Fatalf("fetching invoked") |
||||||
|
case <-time.After(10 * time.Millisecond): |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// verifyCompletingEvent verifies that one single event arrive on an completing channel.
|
||||||
|
func verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) { |
||||||
|
if arrive { |
||||||
|
select { |
||||||
|
case <-completing: |
||||||
|
case <-time.After(time.Second): |
||||||
|
t.Fatalf("completing timeout") |
||||||
|
} |
||||||
|
} else { |
||||||
|
select { |
||||||
|
case <-completing: |
||||||
|
t.Fatalf("completing invoked") |
||||||
|
case <-time.After(10 * time.Millisecond): |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// verifyImportEvent verifies that one single event arrive on an import channel.
|
||||||
|
func verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) { |
||||||
|
if arrive { |
||||||
|
select { |
||||||
|
case <-imported: |
||||||
|
case <-time.After(time.Second): |
||||||
|
t.Fatalf("import timeout") |
||||||
|
} |
||||||
|
} else { |
||||||
|
select { |
||||||
|
case <-imported: |
||||||
|
t.Fatalf("import invoked") |
||||||
|
case <-time.After(20 * time.Millisecond): |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// verifyImportCount verifies that exactly count number of events arrive on an
|
||||||
|
// import hook channel.
|
||||||
|
func verifyImportCount(t *testing.T, imported chan interface{}, count int) { |
||||||
|
for i := 0; i < count; i++ { |
||||||
|
select { |
||||||
|
case <-imported: |
||||||
|
case <-time.After(time.Second): |
||||||
|
t.Fatalf("block %d: import timeout", i+1) |
||||||
|
} |
||||||
|
} |
||||||
|
verifyImportDone(t, imported) |
||||||
|
} |
||||||
|
|
||||||
|
// verifyImportDone verifies that no more events are arriving on an import channel.
|
||||||
|
func verifyImportDone(t *testing.T, imported chan interface{}) { |
||||||
|
select { |
||||||
|
case <-imported: |
||||||
|
t.Fatalf("extra block imported") |
||||||
|
case <-time.After(50 * time.Millisecond): |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// verifyChainHeight verifies the chain height is as expected.
|
||||||
|
func verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) { |
||||||
|
if fetcher.chainHeight() != height { |
||||||
|
t.Fatalf("chain height mismatch, got %d, want %d", fetcher.chainHeight(), height) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that a fetcher accepts block/header announcements and initiates retrievals
|
||||||
|
// for them, successfully importing into the local chain.
|
||||||
|
func TestFullSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, false) } |
||||||
|
func TestLightSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, true) } |
||||||
|
|
||||||
|
func testSequentialAnnouncements(t *testing.T, light bool) { |
||||||
|
// Create a chain of blocks to import
|
||||||
|
targetBlocks := 4 * hashLimit |
||||||
|
hashes, blocks := makeChain(targetBlocks, 0, genesis) |
||||||
|
|
||||||
|
tester := newTester(light) |
||||||
|
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) |
||||||
|
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) |
||||||
|
|
||||||
|
// Iteratively announce blocks until all are imported
|
||||||
|
imported := make(chan interface{}) |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { |
||||||
|
if light { |
||||||
|
if header == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty header") |
||||||
|
} |
||||||
|
imported <- header |
||||||
|
} else { |
||||||
|
if block == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty block") |
||||||
|
} |
||||||
|
imported <- block |
||||||
|
} |
||||||
|
} |
||||||
|
for i := len(hashes) - 2; i >= 0; i-- { |
||||||
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
verifyImportEvent(t, imported, true) |
||||||
|
} |
||||||
|
verifyImportDone(t, imported) |
||||||
|
verifyChainHeight(t, tester, uint64(len(hashes)-1)) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that if blocks are announced by multiple peers (or even the same buggy
|
||||||
|
// peer), they will only get downloaded at most once.
|
||||||
|
func TestFullConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, false) } |
||||||
|
func TestLightConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, true) } |
||||||
|
|
||||||
|
func testConcurrentAnnouncements(t *testing.T, light bool) { |
||||||
|
// Create a chain of blocks to import
|
||||||
|
targetBlocks := 4 * hashLimit |
||||||
|
hashes, blocks := makeChain(targetBlocks, 0, genesis) |
||||||
|
|
||||||
|
// Assemble a tester with a built in counter for the requests
|
||||||
|
tester := newTester(light) |
||||||
|
firstHeaderFetcher := tester.makeHeaderFetcher("first", blocks, -gatherSlack) |
||||||
|
firstBodyFetcher := tester.makeBodyFetcher("first", blocks, 0) |
||||||
|
secondHeaderFetcher := tester.makeHeaderFetcher("second", blocks, -gatherSlack) |
||||||
|
secondBodyFetcher := tester.makeBodyFetcher("second", blocks, 0) |
||||||
|
|
||||||
|
counter := uint32(0) |
||||||
|
firstHeaderWrapper := func(hash common.Hash) error { |
||||||
|
atomic.AddUint32(&counter, 1) |
||||||
|
return firstHeaderFetcher(hash) |
||||||
|
} |
||||||
|
secondHeaderWrapper := func(hash common.Hash) error { |
||||||
|
atomic.AddUint32(&counter, 1) |
||||||
|
return secondHeaderFetcher(hash) |
||||||
|
} |
||||||
|
// Iteratively announce blocks until all are imported
|
||||||
|
imported := make(chan interface{}) |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { |
||||||
|
if light { |
||||||
|
if header == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty header") |
||||||
|
} |
||||||
|
imported <- header |
||||||
|
} else { |
||||||
|
if block == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty block") |
||||||
|
} |
||||||
|
imported <- block |
||||||
|
} |
||||||
|
} |
||||||
|
for i := len(hashes) - 2; i >= 0; i-- { |
||||||
|
tester.fetcher.Notify("first", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher) |
||||||
|
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), secondHeaderWrapper, secondBodyFetcher) |
||||||
|
tester.fetcher.Notify("second", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), secondHeaderWrapper, secondBodyFetcher) |
||||||
|
verifyImportEvent(t, imported, true) |
||||||
|
} |
||||||
|
verifyImportDone(t, imported) |
||||||
|
|
||||||
|
// Make sure no blocks were retrieved twice
|
||||||
|
if int(counter) != targetBlocks { |
||||||
|
t.Fatalf("retrieval count mismatch: have %v, want %v", counter, targetBlocks) |
||||||
|
} |
||||||
|
verifyChainHeight(t, tester, uint64(len(hashes)-1)) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that announcements arriving while a previous is being fetched still
|
||||||
|
// results in a valid import.
|
||||||
|
func TestFullOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, false) } |
||||||
|
func TestLightOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, true) } |
||||||
|
|
||||||
|
func testOverlappingAnnouncements(t *testing.T, light bool) { |
||||||
|
// Create a chain of blocks to import
|
||||||
|
targetBlocks := 4 * hashLimit |
||||||
|
hashes, blocks := makeChain(targetBlocks, 0, genesis) |
||||||
|
|
||||||
|
tester := newTester(light) |
||||||
|
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) |
||||||
|
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) |
||||||
|
|
||||||
|
// Iteratively announce blocks, but overlap them continuously
|
||||||
|
overlap := 16 |
||||||
|
imported := make(chan interface{}, len(hashes)-1) |
||||||
|
for i := 0; i < overlap; i++ { |
||||||
|
imported <- nil |
||||||
|
} |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { |
||||||
|
if light { |
||||||
|
if header == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty header") |
||||||
|
} |
||||||
|
imported <- header |
||||||
|
} else { |
||||||
|
if block == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty block") |
||||||
|
} |
||||||
|
imported <- block |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
for i := len(hashes) - 2; i >= 0; i-- { |
||||||
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
select { |
||||||
|
case <-imported: |
||||||
|
case <-time.After(time.Second): |
||||||
|
t.Fatalf("block %d: import timeout", len(hashes)-i) |
||||||
|
} |
||||||
|
} |
||||||
|
// Wait for all the imports to complete and check count
|
||||||
|
verifyImportCount(t, imported, overlap) |
||||||
|
verifyChainHeight(t, tester, uint64(len(hashes)-1)) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that announces already being retrieved will not be duplicated.
|
||||||
|
func TestFullPendingDeduplication(t *testing.T) { testPendingDeduplication(t, false) } |
||||||
|
func TestLightPendingDeduplication(t *testing.T) { testPendingDeduplication(t, true) } |
||||||
|
|
||||||
|
func testPendingDeduplication(t *testing.T, light bool) { |
||||||
|
// Create a hash and corresponding block
|
||||||
|
hashes, blocks := makeChain(1, 0, genesis) |
||||||
|
|
||||||
|
// Assemble a tester with a built in counter and delayed fetcher
|
||||||
|
tester := newTester(light) |
||||||
|
headerFetcher := tester.makeHeaderFetcher("repeater", blocks, -gatherSlack) |
||||||
|
bodyFetcher := tester.makeBodyFetcher("repeater", blocks, 0) |
||||||
|
|
||||||
|
delay := 50 * time.Millisecond |
||||||
|
counter := uint32(0) |
||||||
|
headerWrapper := func(hash common.Hash) error { |
||||||
|
atomic.AddUint32(&counter, 1) |
||||||
|
|
||||||
|
// Simulate a long running fetch
|
||||||
|
go func() { |
||||||
|
time.Sleep(delay) |
||||||
|
headerFetcher(hash) |
||||||
|
}() |
||||||
|
return nil |
||||||
|
} |
||||||
|
checkNonExist := func() bool { |
||||||
|
return tester.getBlock(hashes[0]) == nil |
||||||
|
} |
||||||
|
if light { |
||||||
|
checkNonExist = func() bool { |
||||||
|
return tester.getHeader(hashes[0]) == nil |
||||||
|
} |
||||||
|
} |
||||||
|
// Announce the same block many times until it's fetched (wait for any pending ops)
|
||||||
|
for checkNonExist() { |
||||||
|
tester.fetcher.Notify("repeater", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher) |
||||||
|
time.Sleep(time.Millisecond) |
||||||
|
} |
||||||
|
time.Sleep(delay) |
||||||
|
|
||||||
|
// Check that all blocks were imported and none fetched twice
|
||||||
|
if int(counter) != 1 { |
||||||
|
t.Fatalf("retrieval count mismatch: have %v, want %v", counter, 1) |
||||||
|
} |
||||||
|
verifyChainHeight(t, tester, 1) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that announcements retrieved in a random order are cached and eventually
|
||||||
|
// imported when all the gaps are filled in.
|
||||||
|
func TestFullRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, false) } |
||||||
|
func TestLightRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, true) } |
||||||
|
|
||||||
|
func testRandomArrivalImport(t *testing.T, light bool) { |
||||||
|
// Create a chain of blocks to import, and choose one to delay
|
||||||
|
targetBlocks := maxQueueDist |
||||||
|
hashes, blocks := makeChain(targetBlocks, 0, genesis) |
||||||
|
skip := targetBlocks / 2 |
||||||
|
|
||||||
|
tester := newTester(light) |
||||||
|
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) |
||||||
|
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) |
||||||
|
|
||||||
|
// Iteratively announce blocks, skipping one entry
|
||||||
|
imported := make(chan interface{}, len(hashes)-1) |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { |
||||||
|
if light { |
||||||
|
if header == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty header") |
||||||
|
} |
||||||
|
imported <- header |
||||||
|
} else { |
||||||
|
if block == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty block") |
||||||
|
} |
||||||
|
imported <- block |
||||||
|
} |
||||||
|
} |
||||||
|
for i := len(hashes) - 1; i >= 0; i-- { |
||||||
|
if i != skip { |
||||||
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
time.Sleep(time.Millisecond) |
||||||
|
} |
||||||
|
} |
||||||
|
// Finally announce the skipped entry and check full import
|
||||||
|
tester.fetcher.Notify("valid", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
verifyImportCount(t, imported, len(hashes)-1) |
||||||
|
verifyChainHeight(t, tester, uint64(len(hashes)-1)) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that direct block enqueues (due to block propagation vs. hash announce)
|
||||||
|
// are correctly schedule, filling and import queue gaps.
|
||||||
|
func TestQueueGapFill(t *testing.T) { |
||||||
|
// Create a chain of blocks to import, and choose one to not announce at all
|
||||||
|
targetBlocks := maxQueueDist |
||||||
|
hashes, blocks := makeChain(targetBlocks, 0, genesis) |
||||||
|
skip := targetBlocks / 2 |
||||||
|
|
||||||
|
tester := newTester(false) |
||||||
|
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) |
||||||
|
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) |
||||||
|
|
||||||
|
// Iteratively announce blocks, skipping one entry
|
||||||
|
imported := make(chan interface{}, len(hashes)-1) |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } |
||||||
|
|
||||||
|
for i := len(hashes) - 1; i >= 0; i-- { |
||||||
|
if i != skip { |
||||||
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
time.Sleep(time.Millisecond) |
||||||
|
} |
||||||
|
} |
||||||
|
// Fill the missing block directly as if propagated
|
||||||
|
tester.fetcher.Enqueue("valid", blocks[hashes[skip]]) |
||||||
|
verifyImportCount(t, imported, len(hashes)-1) |
||||||
|
verifyChainHeight(t, tester, uint64(len(hashes)-1)) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that blocks arriving from various sources (multiple propagations, hash
|
||||||
|
// announces, etc) do not get scheduled for import multiple times.
|
||||||
|
func TestImportDeduplication(t *testing.T) { |
||||||
|
// Create two blocks to import (one for duplication, the other for stalling)
|
||||||
|
hashes, blocks := makeChain(2, 0, genesis) |
||||||
|
|
||||||
|
// Create the tester and wrap the importer with a counter
|
||||||
|
tester := newTester(false) |
||||||
|
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) |
||||||
|
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) |
||||||
|
|
||||||
|
counter := uint32(0) |
||||||
|
tester.fetcher.insertChain = func(blocks types.Blocks) (int, error) { |
||||||
|
atomic.AddUint32(&counter, uint32(len(blocks))) |
||||||
|
return tester.insertChain(blocks) |
||||||
|
} |
||||||
|
// Instrument the fetching and imported events
|
||||||
|
fetching := make(chan []common.Hash) |
||||||
|
imported := make(chan interface{}, len(hashes)-1) |
||||||
|
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } |
||||||
|
|
||||||
|
// Announce the duplicating block, wait for retrieval, and also propagate directly
|
||||||
|
tester.fetcher.Notify("valid", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
<-fetching |
||||||
|
|
||||||
|
tester.fetcher.Enqueue("valid", blocks[hashes[0]]) |
||||||
|
tester.fetcher.Enqueue("valid", blocks[hashes[0]]) |
||||||
|
tester.fetcher.Enqueue("valid", blocks[hashes[0]]) |
||||||
|
|
||||||
|
// Fill the missing block directly as if propagated, and check import uniqueness
|
||||||
|
tester.fetcher.Enqueue("valid", blocks[hashes[1]]) |
||||||
|
verifyImportCount(t, imported, 2) |
||||||
|
|
||||||
|
if counter != 2 { |
||||||
|
t.Fatalf("import invocation count mismatch: have %v, want %v", counter, 2) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that blocks with numbers much lower or higher than out current head get
|
||||||
|
// discarded to prevent wasting resources on useless blocks from faulty peers.
|
||||||
|
func TestDistantPropagationDiscarding(t *testing.T) { |
||||||
|
// Create a long chain to import and define the discard boundaries
|
||||||
|
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) |
||||||
|
head := hashes[len(hashes)/2] |
||||||
|
|
||||||
|
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 |
||||||
|
|
||||||
|
// Create a tester and simulate a head block being the middle of the above chain
|
||||||
|
tester := newTester(false) |
||||||
|
|
||||||
|
tester.lock.Lock() |
||||||
|
tester.hashes = []common.Hash{head} |
||||||
|
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} |
||||||
|
tester.lock.Unlock() |
||||||
|
|
||||||
|
// Ensure that a block with a lower number than the threshold is discarded
|
||||||
|
tester.fetcher.Enqueue("lower", blocks[hashes[low]]) |
||||||
|
time.Sleep(10 * time.Millisecond) |
||||||
|
if !tester.fetcher.queue.Empty() { |
||||||
|
t.Fatalf("fetcher queued stale block") |
||||||
|
} |
||||||
|
// Ensure that a block with a higher number than the threshold is discarded
|
||||||
|
tester.fetcher.Enqueue("higher", blocks[hashes[high]]) |
||||||
|
time.Sleep(10 * time.Millisecond) |
||||||
|
if !tester.fetcher.queue.Empty() { |
||||||
|
t.Fatalf("fetcher queued future block") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that announcements with numbers much lower or higher than out current
|
||||||
|
// head get discarded to prevent wasting resources on useless blocks from faulty
|
||||||
|
// peers.
|
||||||
|
func TestFullDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, false) } |
||||||
|
func TestLightDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, true) } |
||||||
|
|
||||||
|
func testDistantAnnouncementDiscarding(t *testing.T, light bool) { |
||||||
|
// Create a long chain to import and define the discard boundaries
|
||||||
|
hashes, blocks := makeChain(3*maxQueueDist, 0, genesis) |
||||||
|
head := hashes[len(hashes)/2] |
||||||
|
|
||||||
|
low, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1 |
||||||
|
|
||||||
|
// Create a tester and simulate a head block being the middle of the above chain
|
||||||
|
tester := newTester(light) |
||||||
|
|
||||||
|
tester.lock.Lock() |
||||||
|
tester.hashes = []common.Hash{head} |
||||||
|
tester.headers = map[common.Hash]*types.Header{head: blocks[head].Header()} |
||||||
|
tester.blocks = map[common.Hash]*types.Block{head: blocks[head]} |
||||||
|
tester.lock.Unlock() |
||||||
|
|
||||||
|
headerFetcher := tester.makeHeaderFetcher("lower", blocks, -gatherSlack) |
||||||
|
bodyFetcher := tester.makeBodyFetcher("lower", blocks, 0) |
||||||
|
|
||||||
|
fetching := make(chan struct{}, 2) |
||||||
|
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} } |
||||||
|
|
||||||
|
// Ensure that a block with a lower number than the threshold is discarded
|
||||||
|
tester.fetcher.Notify("lower", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
select { |
||||||
|
case <-time.After(50 * time.Millisecond): |
||||||
|
case <-fetching: |
||||||
|
t.Fatalf("fetcher requested stale header") |
||||||
|
} |
||||||
|
// Ensure that a block with a higher number than the threshold is discarded
|
||||||
|
tester.fetcher.Notify("higher", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
select { |
||||||
|
case <-time.After(50 * time.Millisecond): |
||||||
|
case <-fetching: |
||||||
|
t.Fatalf("fetcher requested future header") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that peers announcing blocks with invalid numbers (i.e. not matching
|
||||||
|
// the headers provided afterwards) get dropped as malicious.
|
||||||
|
func TestFullInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, false) } |
||||||
|
func TestLightInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, true) } |
||||||
|
|
||||||
|
func testInvalidNumberAnnouncement(t *testing.T, light bool) { |
||||||
|
// Create a single block to import and check numbers against
|
||||||
|
hashes, blocks := makeChain(1, 0, genesis) |
||||||
|
|
||||||
|
tester := newTester(light) |
||||||
|
badHeaderFetcher := tester.makeHeaderFetcher("bad", blocks, -gatherSlack) |
||||||
|
badBodyFetcher := tester.makeBodyFetcher("bad", blocks, 0) |
||||||
|
|
||||||
|
imported := make(chan interface{}) |
||||||
|
announced := make(chan interface{}) |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { |
||||||
|
if light { |
||||||
|
if header == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty header") |
||||||
|
} |
||||||
|
imported <- header |
||||||
|
} else { |
||||||
|
if block == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty block") |
||||||
|
} |
||||||
|
imported <- block |
||||||
|
} |
||||||
|
} |
||||||
|
// Announce a block with a bad number, check for immediate drop
|
||||||
|
tester.fetcher.announceChangeHook = func(hash common.Hash, b bool) { |
||||||
|
announced <- nil |
||||||
|
} |
||||||
|
tester.fetcher.Notify("bad", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher) |
||||||
|
verifyAnnounce := func() { |
||||||
|
for i := 0; i < 2; i++ { |
||||||
|
select { |
||||||
|
case <-announced: |
||||||
|
continue |
||||||
|
case <-time.After(1 * time.Second): |
||||||
|
t.Fatal("announce timeout") |
||||||
|
return |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
verifyAnnounce() |
||||||
|
verifyImportEvent(t, imported, false) |
||||||
|
tester.lock.RLock() |
||||||
|
dropped := tester.drops["bad"] |
||||||
|
tester.lock.RUnlock() |
||||||
|
|
||||||
|
if !dropped { |
||||||
|
t.Fatalf("peer with invalid numbered announcement not dropped") |
||||||
|
} |
||||||
|
goodHeaderFetcher := tester.makeHeaderFetcher("good", blocks, -gatherSlack) |
||||||
|
goodBodyFetcher := tester.makeBodyFetcher("good", blocks, 0) |
||||||
|
// Make sure a good announcement passes without a drop
|
||||||
|
tester.fetcher.Notify("good", hashes[0], 1, time.Now().Add(-arriveTimeout), goodHeaderFetcher, goodBodyFetcher) |
||||||
|
verifyAnnounce() |
||||||
|
verifyImportEvent(t, imported, true) |
||||||
|
|
||||||
|
tester.lock.RLock() |
||||||
|
dropped = tester.drops["good"] |
||||||
|
tester.lock.RUnlock() |
||||||
|
|
||||||
|
if dropped { |
||||||
|
t.Fatalf("peer with valid numbered announcement dropped") |
||||||
|
} |
||||||
|
verifyImportDone(t, imported) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that if a block is empty (i.e. header only), no body request should be
|
||||||
|
// made, and instead the header should be assembled into a whole block in itself.
|
||||||
|
func TestEmptyBlockShortCircuit(t *testing.T) { |
||||||
|
// Create a chain of blocks to import
|
||||||
|
hashes, blocks := makeChain(32, 0, genesis) |
||||||
|
|
||||||
|
tester := newTester(false) |
||||||
|
headerFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) |
||||||
|
bodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) |
||||||
|
|
||||||
|
// Add a monitoring hook for all internal events
|
||||||
|
fetching := make(chan []common.Hash) |
||||||
|
tester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes } |
||||||
|
|
||||||
|
completing := make(chan []common.Hash) |
||||||
|
tester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes } |
||||||
|
|
||||||
|
imported := make(chan interface{}) |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { |
||||||
|
if block == nil { |
||||||
|
t.Fatalf("Fetcher try to import empty block") |
||||||
|
} |
||||||
|
imported <- block |
||||||
|
} |
||||||
|
// Iteratively announce blocks until all are imported
|
||||||
|
for i := len(hashes) - 2; i >= 0; i-- { |
||||||
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher) |
||||||
|
|
||||||
|
// All announces should fetch the header
|
||||||
|
verifyFetchingEvent(t, fetching, true) |
||||||
|
|
||||||
|
// Only blocks with data contents should request bodies
|
||||||
|
verifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0) |
||||||
|
|
||||||
|
// Irrelevant of the construct, import should succeed
|
||||||
|
verifyImportEvent(t, imported, true) |
||||||
|
} |
||||||
|
verifyImportDone(t, imported) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that a peer is unable to use unbounded memory with sending infinite
|
||||||
|
// block announcements to a node, but that even in the face of such an attack,
|
||||||
|
// the fetcher remains operational.
|
||||||
|
func TestHashMemoryExhaustionAttack(t *testing.T) { |
||||||
|
// Create a tester with instrumented import hooks
|
||||||
|
tester := newTester(false) |
||||||
|
|
||||||
|
imported, announces := make(chan interface{}), int32(0) |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } |
||||||
|
tester.fetcher.announceChangeHook = func(hash common.Hash, added bool) { |
||||||
|
if added { |
||||||
|
atomic.AddInt32(&announces, 1) |
||||||
|
} else { |
||||||
|
atomic.AddInt32(&announces, -1) |
||||||
|
} |
||||||
|
} |
||||||
|
// Create a valid chain and an infinite junk chain
|
||||||
|
targetBlocks := hashLimit + 2*maxQueueDist |
||||||
|
hashes, blocks := makeChain(targetBlocks, 0, genesis) |
||||||
|
validHeaderFetcher := tester.makeHeaderFetcher("valid", blocks, -gatherSlack) |
||||||
|
validBodyFetcher := tester.makeBodyFetcher("valid", blocks, 0) |
||||||
|
|
||||||
|
attack, _ := makeChain(targetBlocks, 0, unknownBlock) |
||||||
|
attackerHeaderFetcher := tester.makeHeaderFetcher("attacker", nil, -gatherSlack) |
||||||
|
attackerBodyFetcher := tester.makeBodyFetcher("attacker", nil, 0) |
||||||
|
|
||||||
|
// Feed the tester a huge hashset from the attacker, and a limited from the valid peer
|
||||||
|
for i := 0; i < len(attack); i++ { |
||||||
|
if i < maxQueueDist { |
||||||
|
tester.fetcher.Notify("valid", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), validHeaderFetcher, validBodyFetcher) |
||||||
|
} |
||||||
|
tester.fetcher.Notify("attacker", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher) |
||||||
|
} |
||||||
|
if count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist { |
||||||
|
t.Fatalf("queued announce count mismatch: have %d, want %d", count, hashLimit+maxQueueDist) |
||||||
|
} |
||||||
|
// Wait for fetches to complete
|
||||||
|
verifyImportCount(t, imported, maxQueueDist) |
||||||
|
|
||||||
|
// Feed the remaining valid hashes to ensure DOS protection state remains clean
|
||||||
|
for i := len(hashes) - maxQueueDist - 2; i >= 0; i-- { |
||||||
|
tester.fetcher.Notify("valid", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), validHeaderFetcher, validBodyFetcher) |
||||||
|
verifyImportEvent(t, imported, true) |
||||||
|
} |
||||||
|
verifyImportDone(t, imported) |
||||||
|
} |
||||||
|
|
||||||
|
// Tests that blocks sent to the fetcher (either through propagation or via hash
|
||||||
|
// announces and retrievals) don't pile up indefinitely, exhausting available
|
||||||
|
// system memory.
|
||||||
|
func TestBlockMemoryExhaustionAttack(t *testing.T) { |
||||||
|
// Create a tester with instrumented import hooks
|
||||||
|
tester := newTester(false) |
||||||
|
|
||||||
|
imported, enqueued := make(chan interface{}), int32(0) |
||||||
|
tester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block } |
||||||
|
tester.fetcher.queueChangeHook = func(hash common.Hash, added bool) { |
||||||
|
if added { |
||||||
|
atomic.AddInt32(&enqueued, 1) |
||||||
|
} else { |
||||||
|
atomic.AddInt32(&enqueued, -1) |
||||||
|
} |
||||||
|
} |
||||||
|
// Create a valid chain and a batch of dangling (but in range) blocks
|
||||||
|
targetBlocks := hashLimit + 2*maxQueueDist |
||||||
|
hashes, blocks := makeChain(targetBlocks, 0, genesis) |
||||||
|
attack := make(map[common.Hash]*types.Block) |
||||||
|
for i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ { |
||||||
|
hashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock) |
||||||
|
for _, hash := range hashes[:maxQueueDist-2] { |
||||||
|
attack[hash] = blocks[hash] |
||||||
|
} |
||||||
|
} |
||||||
|
// Try to feed all the attacker blocks make sure only a limited batch is accepted
|
||||||
|
for _, block := range attack { |
||||||
|
tester.fetcher.Enqueue("attacker", block) |
||||||
|
} |
||||||
|
time.Sleep(200 * time.Millisecond) |
||||||
|
if queued := atomic.LoadInt32(&enqueued); queued != blockLimit { |
||||||
|
t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit) |
||||||
|
} |
||||||
|
// Queue up a batch of valid blocks, and check that a new peer is allowed to do so
|
||||||
|
for i := 0; i < maxQueueDist-1; i++ { |
||||||
|
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-3-i]]) |
||||||
|
} |
||||||
|
time.Sleep(100 * time.Millisecond) |
||||||
|
if queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 { |
||||||
|
t.Fatalf("queued block count mismatch: have %d, want %d", queued, blockLimit+maxQueueDist-1) |
||||||
|
} |
||||||
|
// Insert the missing piece (and sanity check the import)
|
||||||
|
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2]]) |
||||||
|
verifyImportCount(t, imported, maxQueueDist) |
||||||
|
|
||||||
|
// Insert the remaining blocks in chunks to ensure clean DOS protection
|
||||||
|
for i := maxQueueDist; i < len(hashes)-1; i++ { |
||||||
|
tester.fetcher.Enqueue("valid", blocks[hashes[len(hashes)-2-i]]) |
||||||
|
verifyImportEvent(t, imported, true) |
||||||
|
} |
||||||
|
verifyImportDone(t, imported) |
||||||
|
} |
Loading…
Reference in new issue