|
|
|
@ -430,7 +430,7 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I |
|
|
|
|
} |
|
|
|
|
height := latest.Number.Uint64() |
|
|
|
|
|
|
|
|
|
origin, err := d.findAncestor(p, height) |
|
|
|
|
origin, err := d.findAncestor(p, latest) |
|
|
|
|
if err != nil { |
|
|
|
|
return err |
|
|
|
|
} |
|
|
|
@ -587,41 +587,85 @@ func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// calculateRequestSpan calculates what headers to request from a peer when trying to determine the
|
|
|
|
|
// common ancestor.
|
|
|
|
|
// It returns parameters to be used for peer.RequestHeadersByNumber:
|
|
|
|
|
// from - starting block number
|
|
|
|
|
// count - number of headers to request
|
|
|
|
|
// skip - number of headers to skip
|
|
|
|
|
// and also returns 'max', the last block which is expected to be returned by the remote peers,
|
|
|
|
|
// given the (from,count,skip)
|
|
|
|
|
func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { |
|
|
|
|
var ( |
|
|
|
|
from int |
|
|
|
|
count int |
|
|
|
|
MaxCount = MaxHeaderFetch / 16 |
|
|
|
|
) |
|
|
|
|
// requestHead is the highest block that we will ask for. If requestHead is not offset,
|
|
|
|
|
// the highest block that we will get is 16 blocks back from head, which means we
|
|
|
|
|
// will fetch 14 or 15 blocks unnecessarily in the case the height difference
|
|
|
|
|
// between us and the peer is 1-2 blocks, which is most common
|
|
|
|
|
requestHead := int(remoteHeight) - 1 |
|
|
|
|
if requestHead < 0 { |
|
|
|
|
requestHead = 0 |
|
|
|
|
} |
|
|
|
|
// requestBottom is the lowest block we want included in the query
|
|
|
|
|
// Ideally, we want to include just below own head
|
|
|
|
|
requestBottom := int(localHeight - 1) |
|
|
|
|
if requestBottom < 0 { |
|
|
|
|
requestBottom = 0 |
|
|
|
|
} |
|
|
|
|
totalSpan := requestHead - requestBottom |
|
|
|
|
span := 1 + totalSpan/MaxCount |
|
|
|
|
if span < 2 { |
|
|
|
|
span = 2 |
|
|
|
|
} |
|
|
|
|
if span > 16 { |
|
|
|
|
span = 16 |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
count = 1 + totalSpan/span |
|
|
|
|
if count > MaxCount { |
|
|
|
|
count = MaxCount |
|
|
|
|
} |
|
|
|
|
if count < 2 { |
|
|
|
|
count = 2 |
|
|
|
|
} |
|
|
|
|
from = requestHead - (count-1)*span |
|
|
|
|
if from < 0 { |
|
|
|
|
from = 0 |
|
|
|
|
} |
|
|
|
|
max := from + (count-1)*span |
|
|
|
|
return int64(from), count, span - 1, uint64(max) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// findAncestor tries to locate the common ancestor link of the local chain and
|
|
|
|
|
// a remote peers blockchain. In the general case when our node was in sync and
|
|
|
|
|
// on the correct chain, checking the top N links should already get us a match.
|
|
|
|
|
// In the rare scenario when we ended up on a long reorganisation (i.e. none of
|
|
|
|
|
// the head links match), we do a binary search to find the common ancestor.
|
|
|
|
|
func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { |
|
|
|
|
func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { |
|
|
|
|
// Figure out the valid ancestor range to prevent rewrite attacks
|
|
|
|
|
floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() |
|
|
|
|
|
|
|
|
|
if d.mode == FullSync { |
|
|
|
|
ceil = d.blockchain.CurrentBlock().NumberU64() |
|
|
|
|
} else if d.mode == FastSync { |
|
|
|
|
ceil = d.blockchain.CurrentFastBlock().NumberU64() |
|
|
|
|
var ( |
|
|
|
|
floor = int64(-1) |
|
|
|
|
localHeight uint64 |
|
|
|
|
remoteHeight = remoteHeader.Number.Uint64() |
|
|
|
|
) |
|
|
|
|
switch d.mode { |
|
|
|
|
case FullSync: |
|
|
|
|
localHeight = d.blockchain.CurrentBlock().NumberU64() |
|
|
|
|
case FastSync: |
|
|
|
|
localHeight = d.blockchain.CurrentFastBlock().NumberU64() |
|
|
|
|
default: |
|
|
|
|
localHeight = d.lightchain.CurrentHeader().Number.Uint64() |
|
|
|
|
} |
|
|
|
|
if ceil >= MaxForkAncestry { |
|
|
|
|
floor = int64(ceil - MaxForkAncestry) |
|
|
|
|
p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) |
|
|
|
|
if localHeight >= MaxForkAncestry { |
|
|
|
|
floor = int64(localHeight - MaxForkAncestry) |
|
|
|
|
} |
|
|
|
|
p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) |
|
|
|
|
|
|
|
|
|
// Request the topmost blocks to short circuit binary ancestor lookup
|
|
|
|
|
head := ceil |
|
|
|
|
if head > height { |
|
|
|
|
head = height |
|
|
|
|
} |
|
|
|
|
from := int64(head) - int64(MaxHeaderFetch) |
|
|
|
|
if from < 0 { |
|
|
|
|
from = 0 |
|
|
|
|
} |
|
|
|
|
// Span out with 15 block gaps into the future to catch bad head reports
|
|
|
|
|
limit := 2 * MaxHeaderFetch / 16 |
|
|
|
|
count := 1 + int((int64(ceil)-from)/16) |
|
|
|
|
if count > limit { |
|
|
|
|
count = limit |
|
|
|
|
} |
|
|
|
|
go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) |
|
|
|
|
from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) |
|
|
|
|
go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false) |
|
|
|
|
|
|
|
|
|
// Wait for the remote response to the head fetch
|
|
|
|
|
number, hash := uint64(0), common.Hash{} |
|
|
|
@ -647,9 +691,10 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err |
|
|
|
|
return 0, errEmptyHeaderSet |
|
|
|
|
} |
|
|
|
|
// Make sure the peer's reply conforms to the request
|
|
|
|
|
for i := 0; i < len(headers); i++ { |
|
|
|
|
if number := headers[i].Number.Int64(); number != from+int64(i)*16 { |
|
|
|
|
p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) |
|
|
|
|
for i, header := range headers { |
|
|
|
|
expectNumber := from + int64(i)*int64((skip+1)) |
|
|
|
|
if number := header.Number.Int64(); number != expectNumber { |
|
|
|
|
p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) |
|
|
|
|
return 0, errInvalidChain |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -657,20 +702,15 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err |
|
|
|
|
finished = true |
|
|
|
|
for i := len(headers) - 1; i >= 0; i-- { |
|
|
|
|
// Skip any headers that underflow/overflow our requested set
|
|
|
|
|
if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { |
|
|
|
|
if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { |
|
|
|
|
continue |
|
|
|
|
} |
|
|
|
|
// Otherwise check if we already know the header or not
|
|
|
|
|
h := headers[i].Hash() |
|
|
|
|
n := headers[i].Number.Uint64() |
|
|
|
|
if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) { |
|
|
|
|
if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || |
|
|
|
|
(d.mode != FullSync && d.lightchain.HasHeader(h, n)) { |
|
|
|
|
number, hash = n, h |
|
|
|
|
|
|
|
|
|
// If every header is known, even future ones, the peer straight out lied about its head
|
|
|
|
|
if number > height && i == limit-1 { |
|
|
|
|
p.log.Warn("Lied about chain head", "reported", height, "found", number) |
|
|
|
|
return 0, errStallingPeer |
|
|
|
|
} |
|
|
|
|
break |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
@ -694,7 +734,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err |
|
|
|
|
return number, nil |
|
|
|
|
} |
|
|
|
|
// Ancestor not found, we need to binary search over our chain
|
|
|
|
|
start, end := uint64(0), head |
|
|
|
|
start, end := uint64(0), remoteHeight |
|
|
|
|
if floor > 0 { |
|
|
|
|
start = uint64(floor) |
|
|
|
|
} |
|
|
|
|