|
|
|
@ -19,13 +19,14 @@ import ( |
|
|
|
|
|
|
|
|
|
const ( |
|
|
|
|
maxBlockFetch = 256 // Amount of max blocks to be fetched per chunk
|
|
|
|
|
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
|
|
|
|
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
|
|
|
|
|
blockTtl = 15 * time.Second // The amount of time it takes for a block request to time out
|
|
|
|
|
blockTtl = 20 * time.Second // The amount of time it takes for a block request to time out
|
|
|
|
|
hashTtl = 20 * time.Second // The amount of time it takes for a hash request to time out
|
|
|
|
|
) |
|
|
|
|
|
|
|
|
|
var ( |
|
|
|
|
minDesiredPeerCount = 5 // Amount of peers desired to start syncing
|
|
|
|
|
|
|
|
|
|
errLowTd = errors.New("peer's TD is too low") |
|
|
|
|
errBusy = errors.New("busy") |
|
|
|
|
errUnknownPeer = errors.New("peer's unknown or unhealthy") |
|
|
|
@ -127,11 +128,11 @@ out: |
|
|
|
|
for { |
|
|
|
|
select { |
|
|
|
|
case <-d.newPeerCh: |
|
|
|
|
itimer.Stop() |
|
|
|
|
// Meet the `minDesiredPeerCount` before we select our best peer
|
|
|
|
|
if len(d.peers) < minDesiredPeerCount { |
|
|
|
|
break |
|
|
|
|
} |
|
|
|
|
itimer.Stop() |
|
|
|
|
|
|
|
|
|
d.selectPeer(d.peers.bestPeer()) |
|
|
|
|
case <-itimer.C: |
|
|
|
@ -154,7 +155,9 @@ func (d *Downloader) selectPeer(p *peer) { |
|
|
|
|
// Make sure it's doing neither. Once done we can restart the
|
|
|
|
|
// downloading process if the TD is higher. For now just get on
|
|
|
|
|
// with whatever is going on. This prevents unecessary switching.
|
|
|
|
|
if !d.isBusy() { |
|
|
|
|
if d.isBusy() { |
|
|
|
|
return |
|
|
|
|
} |
|
|
|
|
// selected peer must be better than our own
|
|
|
|
|
// XXX we also check the peer's recent hash to make sure we
|
|
|
|
|
// don't have it. Some peers report (i think) incorrect TD.
|
|
|
|
@ -164,7 +167,6 @@ func (d *Downloader) selectPeer(p *peer) { |
|
|
|
|
|
|
|
|
|
glog.V(logger.Detail).Infoln("New peer with highest TD =", p.td) |
|
|
|
|
d.syncCh <- syncPack{p, p.recentHash, false} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -282,6 +284,8 @@ out: |
|
|
|
|
// If there are unrequested hashes left start fetching
|
|
|
|
|
// from the available peers.
|
|
|
|
|
if d.queue.hashPool.Size() > 0 { |
|
|
|
|
was := d.queue.hashPool.Size() |
|
|
|
|
fmt.Println("it was =", was) |
|
|
|
|
availablePeers := d.peers.get(idleState) |
|
|
|
|
for _, peer := range availablePeers { |
|
|
|
|
// Get a possible chunk. If nil is returned no chunk
|
|
|
|
@ -301,13 +305,14 @@ out: |
|
|
|
|
d.queue.put(chunk.hashes) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
fmt.Println("it is =", d.queue.hashPool.Size()) |
|
|
|
|
|
|
|
|
|
// make sure that we have peers available for fetching. If all peers have been tried
|
|
|
|
|
// and all failed throw an error
|
|
|
|
|
if len(d.queue.fetching) == 0 { |
|
|
|
|
d.queue.reset() |
|
|
|
|
|
|
|
|
|
return fmt.Errorf("%v avaialable = %d. total = %d", errPeersUnavailable, len(availablePeers), len(d.peers)) |
|
|
|
|
return fmt.Errorf("%v peers avaialable = %d. total peers = %d. hashes needed = %d", errPeersUnavailable, len(availablePeers), len(d.peers), d.queue.hashPool.Size()) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
} else if len(d.queue.fetching) == 0 { |
|
|
|
|