eth/downloader: prioritize block fetch based on chain position, cap memory use

pull/863/head
Péter Szilágyi 10 years ago
parent 97c37356fd
commit 4800c94392
  1. 4
      Godeps/Godeps.json
  2. 44
      Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/example_test.go
  3. 75
      Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go
  4. 110
      Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go
  5. 103
      Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go
  6. 93
      Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go
  7. 190
      eth/downloader/downloader.go
  8. 4
      eth/downloader/downloader_test.go
  9. 15
      eth/downloader/peer.go
  10. 416
      eth/downloader/queue.go
  11. 17
      eth/downloader/queue_test.go

4
Godeps/Godeps.json generated vendored

@ -98,6 +98,10 @@
"Comment": "v0.1.0-3-g27c4092", "Comment": "v0.1.0-3-g27c4092",
"Rev": "27c40922c40b43fe04554d8223a402af3ea333f3" "Rev": "27c40922c40b43fe04554d8223a402af3ea333f3"
}, },
{
"ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque",
"Rev": "cf5d8079df7c4501217638e1e3a6e43f94822548"
},
{ {
"ImportPath": "gopkg.in/qml.v1/cdata", "ImportPath": "gopkg.in/qml.v1/cdata",
"Rev": "1116cb9cd8dee23f8d444ded354eb53122739f99" "Rev": "1116cb9cd8dee23f8d444ded354eb53122739f99"

@ -0,0 +1,44 @@
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package prque_test
import (
"fmt"
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
// Insert some data into a priority queue and pop them out in prioritized order.
func Example_usage() {
// Define some data to push into the priority queue
prio := []float32{77.7, 22.2, 44.4, 55.5, 11.1, 88.8, 33.3, 99.9, 0.0, 66.6}
data := []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}
// Create the priority queue and insert the prioritized data
pq := prque.New()
for i := 0; i < len(data); i++ {
pq.Push(data[i], prio[i])
}
// Pop out the data and print them
for !pq.Empty() {
val, prio := pq.Pop()
fmt.Printf("%.1f:%s ", prio, val)
}
// Output:
// 99.9:seven 88.8:five 77.7:zero 66.6:nine 55.5:three 44.4:two 33.3:six 22.2:one 11.1:four 0.0:eight
}

@ -0,0 +1,75 @@
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
// Package prque implements a priority queue data structure supporting arbitrary
// value types and float priorities.
//
// The reasoning behind using floats for the priorities vs. ints or interfaces
// was larger flexibility without sacrificing too much performance or code
// complexity.
//
// If you would like to use a min-priority queue, simply negate the priorities.
//
// Internally the queue is based on the standard heap package working on a
// sortable version of the block based stack.
package prque
import (
"container/heap"
)
// Priority queue data structure.
type Prque struct {
cont *sstack
}
// Creates a new priority queue.
func New() *Prque {
return &Prque{newSstack()}
}
// Pushes a value with a given priority into the queue, expanding if necessary.
func (p *Prque) Push(data interface{}, priority float32) {
heap.Push(p.cont, &item{data, priority})
}
// Pops the value with the greates priority off the stack and returns it.
// Currently no shrinking is done.
func (p *Prque) Pop() (interface{}, float32) {
item := heap.Pop(p.cont).(*item)
return item.value, item.priority
}
// Pops only the item from the queue, dropping the associated priority value.
func (p *Prque) PopItem() interface{} {
return heap.Pop(p.cont).(*item).value
}
// Checks whether the priority queue is empty.
func (p *Prque) Empty() bool {
return p.cont.Len() == 0
}
// Returns the number of element in the priority queue.
func (p *Prque) Size() int {
return p.cont.Len()
}
// Clears the contents of the priority queue.
func (p *Prque) Reset() {
p.cont.Reset()
}

@ -0,0 +1,110 @@
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package prque
import (
"math/rand"
"testing"
)
func TestPrque(t *testing.T) {
// Generate a batch of random data and a specific priority order
size := 16 * blockSize
prio := rand.Perm(size)
data := make([]int, size)
for i := 0; i < size; i++ {
data[i] = rand.Int()
}
queue := New()
for rep := 0; rep < 2; rep++ {
// Fill a priority queue with the above data
for i := 0; i < size; i++ {
queue.Push(data[i], float32(prio[i]))
if queue.Size() != i+1 {
t.Errorf("queue size mismatch: have %v, want %v.", queue.Size(), i+1)
}
}
// Create a map the values to the priorities for easier verification
dict := make(map[float32]int)
for i := 0; i < size; i++ {
dict[float32(prio[i])] = data[i]
}
// Pop out the elements in priority order and verify them
prevPrio := float32(size + 1)
for !queue.Empty() {
val, prio := queue.Pop()
if prio > prevPrio {
t.Errorf("invalid priority order: %v after %v.", prio, prevPrio)
}
prevPrio = prio
if val != dict[prio] {
t.Errorf("push/pop mismatch: have %v, want %v.", val, dict[prio])
}
delete(dict, prio)
}
}
}
func TestReset(t *testing.T) {
// Fill the queue with some random data
size := 16 * blockSize
queue := New()
for i := 0; i < size; i++ {
queue.Push(rand.Int(), rand.Float32())
}
// Reset and ensure it's empty
queue.Reset()
if !queue.Empty() {
t.Errorf("priority queue not empty after reset: %v", queue)
}
}
func BenchmarkPush(b *testing.B) {
// Create some initial data
data := make([]int, b.N)
prio := make([]float32, b.N)
for i := 0; i < len(data); i++ {
data[i] = rand.Int()
prio[i] = rand.Float32()
}
// Execute the benchmark
b.ResetTimer()
queue := New()
for i := 0; i < len(data); i++ {
queue.Push(data[i], prio[i])
}
}
func BenchmarkPop(b *testing.B) {
// Create some initial data
data := make([]int, b.N)
prio := make([]float32, b.N)
for i := 0; i < len(data); i++ {
data[i] = rand.Int()
prio[i] = rand.Float32()
}
queue := New()
for i := 0; i < len(data); i++ {
queue.Push(data[i], prio[i])
}
// Execute the benchmark
b.ResetTimer()
for !queue.Empty() {
queue.Pop()
}
}

@ -0,0 +1,103 @@
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package prque
// The size of a block of data
const blockSize = 4096
// A prioritized item in the sorted stack.
type item struct {
value interface{}
priority float32
}
// Internal sortable stack data structure. Implements the Push and Pop ops for
// the stack (heap) functionality and the Len, Less and Swap methods for the
// sortability requirements of the heaps.
type sstack struct {
size int
capacity int
offset int
blocks [][]*item
active []*item
}
// Creates a new, empty stack.
func newSstack() *sstack {
result := new(sstack)
result.active = make([]*item, blockSize)
result.blocks = [][]*item{result.active}
result.capacity = blockSize
return result
}
// Pushes a value onto the stack, expanding it if necessary. Required by
// heap.Interface.
func (s *sstack) Push(data interface{}) {
if s.size == s.capacity {
s.active = make([]*item, blockSize)
s.blocks = append(s.blocks, s.active)
s.capacity += blockSize
s.offset = 0
} else if s.offset == blockSize {
s.active = s.blocks[s.size/blockSize]
s.offset = 0
}
s.active[s.offset] = data.(*item)
s.offset++
s.size++
}
// Pops a value off the stack and returns it. Currently no shrinking is done.
// Required by heap.Interface.
func (s *sstack) Pop() (res interface{}) {
s.size--
s.offset--
if s.offset < 0 {
s.offset = blockSize - 1
s.active = s.blocks[s.size/blockSize]
}
res, s.active[s.offset] = s.active[s.offset], nil
return
}
// Returns the length of the stack. Required by sort.Interface.
func (s *sstack) Len() int {
return s.size
}
// Compares the priority of two elements of the stack (higher is first).
// Required by sort.Interface.
func (s *sstack) Less(i, j int) bool {
return s.blocks[i/blockSize][i%blockSize].priority > s.blocks[j/blockSize][j%blockSize].priority
}
// Swapts two elements in the stack. Required by sort.Interface.
func (s *sstack) Swap(i, j int) {
ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize
s.blocks[ib][io], s.blocks[jb][jo] = s.blocks[jb][jo], s.blocks[ib][io]
}
// Resets the stack, effectively clearing its contents.
func (s *sstack) Reset() {
s.size = 0
s.offset = 0
s.active = s.blocks[0]
s.capacity = blockSize
}

@ -0,0 +1,93 @@
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: you can redistribute it and/or modify it under
// the terms of the GNU General Public License as published by the Free Software
// Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// The toolbox is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
// more details.
//
// Alternatively, the CookieJar toolbox may be used in accordance with the terms
// and conditions contained in a signed written agreement between you and the
// author(s).
package prque
import (
"math/rand"
"sort"
"testing"
)
func TestSstack(t *testing.T) {
// Create some initial data
size := 16 * blockSize
data := make([]*item, size)
for i := 0; i < size; i++ {
data[i] = &item{rand.Int(), rand.Float32()}
}
stack := newSstack()
for rep := 0; rep < 2; rep++ {
// Push all the data into the stack, pop out every second
secs := []*item{}
for i := 0; i < size; i++ {
stack.Push(data[i])
if i%2 == 0 {
secs = append(secs, stack.Pop().(*item))
}
}
rest := []*item{}
for stack.Len() > 0 {
rest = append(rest, stack.Pop().(*item))
}
// Make sure the contents of the resulting slices are ok
for i := 0; i < size; i++ {
if i%2 == 0 && data[i] != secs[i/2] {
t.Errorf("push/pop mismatch: have %v, want %v.", secs[i/2], data[i])
}
if i%2 == 1 && data[i] != rest[len(rest)-i/2-1] {
t.Errorf("push/pop mismatch: have %v, want %v.", rest[len(rest)-i/2-1], data[i])
}
}
}
}
func TestSstackSort(t *testing.T) {
// Create some initial data
size := 16 * blockSize
data := make([]*item, size)
for i := 0; i < size; i++ {
data[i] = &item{rand.Int(), float32(i)}
}
// Push all the data into the stack
stack := newSstack()
for _, val := range data {
stack.Push(val)
}
// Sort and pop the stack contents (should reverse the order)
sort.Sort(stack)
for _, val := range data {
out := stack.Pop()
if out != val {
t.Errorf("push/pop mismatch after sort: have %v, want %v.", out, val)
}
}
}
func TestSstackReset(t *testing.T) {
// Push some stuff onto the stack
size := 16 * blockSize
stack := newSstack()
for i := 0; i < size; i++ {
stack.Push(&item{i, float32(i)})
}
// Clear and verify
stack.Reset()
if stack.Len() != 0 {
t.Errorf("stack not empty after reset: %v", stack)
}
}

@ -11,11 +11,10 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
"gopkg.in/fatih/set.v0"
) )
const ( const (
maxBlockFetch = 256 // Amount of max blocks to be fetched per chunk maxBlockFetch = 128 // Amount of max blocks to be fetched per chunk
peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount
hashTtl = 20 * time.Second // The amount of time it takes for a hash request to time out hashTtl = 20 * time.Second // The amount of time it takes for a hash request to time out
) )
@ -80,7 +79,7 @@ type Downloader struct {
func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader { func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader {
downloader := &Downloader{ downloader := &Downloader{
queue: newqueue(), queue: newQueue(),
peers: make(peers), peers: make(peers),
hasBlock: hasBlock, hasBlock: hasBlock,
getBlock: getBlock, getBlock: getBlock,
@ -93,7 +92,7 @@ func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader {
} }
func (d *Downloader) Stats() (current int, max int) { func (d *Downloader) Stats() (current int, max int) {
return d.queue.blockHashes.Size(), d.queue.fetchPool.Size() + d.queue.hashPool.Size() return d.queue.Size()
} }
func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error { func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error {
@ -111,7 +110,7 @@ func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFet
return nil return nil
} }
// UnregisterPeer unregister's a peer. This will prevent any action from the specified peer. // UnregisterPeer unregisters a peer. This will prevent any action from the specified peer.
func (d *Downloader) UnregisterPeer(id string) { func (d *Downloader) UnregisterPeer(id string) {
d.mu.Lock() d.mu.Lock()
defer d.mu.Unlock() defer d.mu.Unlock()
@ -121,20 +120,20 @@ func (d *Downloader) UnregisterPeer(id string) {
delete(d.peers, id) delete(d.peers, id)
} }
// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given // SynchroniseWithPeer will select the peer and use it for synchronizing. If an empty string is given
// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the
// checks fail an error will be returned. This method is synchronous // checks fail an error will be returned. This method is synchronous
func (d *Downloader) Synchronise(id string, hash common.Hash) error { func (d *Downloader) Synchronise(id string, hash common.Hash) error {
// Make sure it's doing neither. Once done we can restart the // Make sure it's doing neither. Once done we can restart the
// downloading process if the TD is higher. For now just get on // downloading process if the TD is higher. For now just get on
// with whatever is going on. This prevents unecessary switching. // with whatever is going on. This prevents unnecessary switching.
if d.isBusy() { if d.isBusy() {
return errBusy return errBusy
} }
// When a synchronisation attempt is made while the queue stil // When a synchronization attempt is made while the queue still
// contains items we abort the sync attempt // contains items we abort the sync attempt
if d.queue.size() > 0 { if done, pend := d.queue.Size(); done+pend > 0 {
return errPendingQueue return errPendingQueue
} }
@ -157,56 +156,23 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error {
// are processed. If the block count reaches zero and done is called // are processed. If the block count reaches zero and done is called
// we reset the queue for the next batch of incoming hashes and blocks. // we reset the queue for the next batch of incoming hashes and blocks.
func (d *Downloader) Done() { func (d *Downloader) Done() {
d.queue.mu.Lock() d.queue.Done()
defer d.queue.mu.Unlock()
if len(d.queue.blocks) == 0 {
d.queue.resetNoTS()
}
} }
// TakeBlocks takes blocks from the queue and yields them to the blockTaker handler // TakeBlocks takes blocks from the queue and yields them to the blockTaker handler
// it's possible it yields no blocks // it's possible it yields no blocks
func (d *Downloader) TakeBlocks() types.Blocks { func (d *Downloader) TakeBlocks() types.Blocks {
d.queue.mu.Lock() // Check that there are blocks available and its parents are known
defer d.queue.mu.Unlock() head := d.queue.GetHeadBlock()
if head == nil || !d.hasBlock(head.ParentHash()) {
var blocks types.Blocks return nil
if len(d.queue.blocks) > 0 {
// Make sure the parent hash is known
if d.queue.blocks[0] != nil && !d.hasBlock(d.queue.blocks[0].ParentHash()) {
return nil
}
for _, block := range d.queue.blocks {
if block == nil {
break
}
blocks = append(blocks, block)
}
d.queue.blockOffset += len(blocks)
// delete the blocks from the slice and let them be garbage collected
// without this slice trick the blocks would stay in memory until nil
// would be assigned to d.queue.blocks
copy(d.queue.blocks, d.queue.blocks[len(blocks):])
for k, n := len(d.queue.blocks)-len(blocks), len(d.queue.blocks); k < n; k++ {
d.queue.blocks[k] = nil
}
d.queue.blocks = d.queue.blocks[:len(d.queue.blocks)-len(blocks)]
//d.queue.blocks = d.queue.blocks[len(blocks):]
if len(d.queue.blocks) == 0 {
d.queue.blocks = nil
}
} }
// Retrieve a full batch of blocks
return blocks return d.queue.TakeBlocks(head)
} }
func (d *Downloader) Has(hash common.Hash) bool { func (d *Downloader) Has(hash common.Hash) bool {
return d.queue.has(hash) return d.queue.Has(hash)
} }
func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) { func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) {
@ -214,7 +180,7 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool)
defer func() { defer func() {
// reset on error // reset on error
if err != nil { if err != nil {
d.queue.reset() d.queue.Reset()
} }
}() }()
@ -244,7 +210,7 @@ func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial b
atomic.StoreInt32(&d.fetchingHashes, 1) atomic.StoreInt32(&d.fetchingHashes, 1)
defer atomic.StoreInt32(&d.fetchingHashes, 0) defer atomic.StoreInt32(&d.fetchingHashes, 0)
if d.queue.has(h) { if d.queue.Has(h) { // TODO: Is this possible? Shouldn't queue be empty for startFetchingHashes to be even called?
return errAlreadyInPool return errAlreadyInPool
} }
@ -256,7 +222,7 @@ func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial b
// In such circumstances we don't need to download the block so don't add it to the queue. // In such circumstances we don't need to download the block so don't add it to the queue.
if !ignoreInitial { if !ignoreInitial {
// Add the hash to the queue first // Add the hash to the queue first
d.queue.hashPool.Add(h) d.queue.Insert([]common.Hash{h})
} }
// Get the first batch of hashes // Get the first batch of hashes
p.getHashes(h) p.getHashes(h)
@ -273,7 +239,7 @@ out:
for { for {
select { select {
case hashPack := <-d.hashCh: case hashPack := <-d.hashCh:
// make sure the active peer is giving us the hashes // Make sure the active peer is giving us the hashes
if hashPack.peerId != activePeer.id { if hashPack.peerId != activePeer.id {
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)\n", hashPack.peerId) glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)\n", hashPack.peerId)
break break
@ -281,43 +247,37 @@ out:
failureResponseTimer.Reset(hashTtl) failureResponseTimer.Reset(hashTtl)
var ( // Make sure the peer actually gave something valid
hashes = hashPack.hashes if len(hashPack.hashes) == 0 {
done bool // determines whether we're done fetching hashes (i.e. common hash found) glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", activePeer.id)
) d.queue.Reset()
hashSet := set.New()
for _, hash = range hashes {
if d.hasBlock(hash) || d.queue.blockHashes.Has(hash) {
glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4])
return errEmptyHashSet
}
// Determine if we're done fetching hashes (queue up all pending), and continue if not done
done, index := false, 0
for index, hash = range hashPack.hashes {
if d.hasBlock(hash) || d.queue.GetBlock(hash) != nil {
glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4])
hashPack.hashes = hashPack.hashes[:index]
done = true done = true
break break
} }
hashSet.Add(hash)
} }
d.queue.put(hashSet) d.queue.Insert(hashPack.hashes)
// Add hashes to the chunk set
if len(hashes) == 0 { // Make sure the peer actually gave you something valid
glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", activePeer.id)
d.queue.reset()
return errEmptyHashSet if !done {
} else if !done { // Check if we're done fetching
// Get the next set of hashes
activePeer.getHashes(hash) activePeer.getHashes(hash)
} else { // we're done continue
// The offset of the queue is determined by the highest known block
var offset int
if block := d.getBlock(hash); block != nil {
offset = int(block.NumberU64() + 1)
}
// allocate proper size for the queueue
d.queue.alloc(offset, d.queue.hashPool.Size())
break out
} }
// We're done, allocate the download cache and proceed pulling the blocks
offset := 0
if block := d.getBlock(hash); block != nil {
offset = int(block.NumberU64() + 1)
}
d.queue.Alloc(offset)
break out
case <-failureResponseTimer.C: case <-failureResponseTimer.C:
glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id) glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id)
@ -326,7 +286,7 @@ out:
// already fetched hash list. This can't guarantee 100% correctness but does // already fetched hash list. This can't guarantee 100% correctness but does
// a fair job. This is always either correct or false incorrect. // a fair job. This is always either correct or false incorrect.
for id, peer := range d.peers { for id, peer := range d.peers {
if d.queue.hashPool.Has(peer.recentHash) && !attemptedPeers[id] { if d.queue.Has(peer.recentHash) && !attemptedPeers[id] {
p = peer p = peer
break break
} }
@ -335,7 +295,7 @@ out:
// if all peers have been tried, abort the process entirely or if the hash is // if all peers have been tried, abort the process entirely or if the hash is
// the zero hash. // the zero hash.
if p == nil || (hash == common.Hash{}) { if p == nil || (hash == common.Hash{}) {
d.queue.reset() d.queue.Reset()
return errTimeout return errTimeout
} }
@ -346,13 +306,14 @@ out:
glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)\n", p.id) glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)\n", p.id)
} }
} }
glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.hashPool.Size(), time.Since(start)) glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.Pending(), time.Since(start))
return nil return nil
} }
func (d *Downloader) startFetchingBlocks(p *peer) error { func (d *Downloader) startFetchingBlocks(p *peer) error {
glog.V(logger.Detail).Infoln("Downloading", d.queue.hashPool.Size(), "block(s)") glog.V(logger.Detail).Infoln("Downloading", d.queue.Pending(), "block(s)")
atomic.StoreInt32(&d.downloadingBlocks, 1) atomic.StoreInt32(&d.downloadingBlocks, 1)
defer atomic.StoreInt32(&d.downloadingBlocks, 0) defer atomic.StoreInt32(&d.downloadingBlocks, 0)
// Defer the peer reset. This will empty the peer requested set // Defer the peer reset. This will empty the peer requested set
@ -362,7 +323,7 @@ func (d *Downloader) startFetchingBlocks(p *peer) error {
start := time.Now() start := time.Now()
// default ticker for re-fetching blocks everynow and then // default ticker for re-fetching blocks every now and then
ticker := time.NewTicker(20 * time.Millisecond) ticker := time.NewTicker(20 * time.Millisecond)
out: out:
for { for {
@ -371,7 +332,7 @@ out:
// If the peer was previously banned and failed to deliver it's pack // If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message. // in a reasonable time frame, ignore it's message.
if d.peers[blockPack.peerId] != nil { if d.peers[blockPack.peerId] != nil {
err := d.queue.deliver(blockPack.peerId, blockPack.blocks) err := d.queue.Deliver(blockPack.peerId, blockPack.blocks)
if err != nil { if err != nil {
glog.V(logger.Debug).Infof("deliver failed for peer %s: %v\n", blockPack.peerId, err) glog.V(logger.Debug).Infof("deliver failed for peer %s: %v\n", blockPack.peerId, err)
// FIXME d.UnregisterPeer(blockPack.peerId) // FIXME d.UnregisterPeer(blockPack.peerId)
@ -385,46 +346,49 @@ out:
d.peers.setState(blockPack.peerId, idleState) d.peers.setState(blockPack.peerId, idleState)
} }
case <-ticker.C: case <-ticker.C:
// after removing bad peers make sure we actually have suffucient peer left to keep downlading // after removing bad peers make sure we actually have sufficient peer left to keep downloading
if len(d.peers) == 0 { if len(d.peers) == 0 {
d.queue.reset() d.queue.Reset()
return errNoPeers return errNoPeers
} }
// If there are unrequested hashes left start fetching // If there are unrequested hashes left start fetching
// from the available peers. // from the available peers.
if d.queue.hashPool.Size() > 0 { if d.queue.Pending() > 0 {
// Throttle the download if block cache is full and waiting processing
if d.queue.Throttle() {
continue
}
availablePeers := d.peers.get(idleState) availablePeers := d.peers.get(idleState)
for _, peer := range availablePeers { for _, peer := range availablePeers {
// Get a possible chunk. If nil is returned no chunk // Get a possible chunk. If nil is returned no chunk
// could be returned due to no hashes available. // could be returned due to no hashes available.
chunk := d.queue.get(peer, maxBlockFetch) request := d.queue.Reserve(peer, maxBlockFetch)
if chunk == nil { if request == nil {
continue continue
} }
// XXX make fetch blocking. // XXX make fetch blocking.
// Fetch the chunk and check for error. If the peer was somehow // Fetch the chunk and check for error. If the peer was somehow
// already fetching a chunk due to a bug, it will be returned to // already fetching a chunk due to a bug, it will be returned to
// the queue // the queue
if err := peer.fetch(chunk); err != nil { if err := peer.fetch(request); err != nil {
// log for tracing // log for tracing
glog.V(logger.Debug).Infof("peer %s received double work (state = %v)\n", peer.id, peer.state) glog.V(logger.Debug).Infof("peer %s received double work (state = %v)\n", peer.id, peer.state)
d.queue.put(chunk.hashes) d.queue.Cancel(request)
} }
} }
// make sure that we have peers available for fetching. If all peers have been tried // make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error // and all failed throw an error
if len(d.queue.fetching) == 0 { if d.queue.InFlight() == 0 {
d.queue.reset() d.queue.Reset()
return fmt.Errorf("%v peers avaialable = %d. total peers = %d. hashes needed = %d", errPeersUnavailable, len(availablePeers), len(d.peers), d.queue.hashPool.Size()) return fmt.Errorf("%v peers avaialable = %d. total peers = %d. hashes needed = %d", errPeersUnavailable, len(availablePeers), len(d.peers), d.queue.Pending())
} }
} else if len(d.queue.fetching) == 0 { } else if d.queue.InFlight() == 0 {
// When there are no more queue and no more `fetching`. We can // When there are no more queue and no more in flight, We can
// safely assume we're done. Another part of the process will check // safely assume we're done. Another part of the process will check
// for parent errors and will re-request anything that's missing // for parent errors and will re-request anything that's missing
break out break out
@ -434,27 +398,13 @@ out:
// that badly or poorly behave are removed from the peer set (not banned). // that badly or poorly behave are removed from the peer set (not banned).
// Bad peers are excluded from the available peer set and therefor won't be // Bad peers are excluded from the available peer set and therefor won't be
// reused. XXX We could re-introduce peers after X time. // reused. XXX We could re-introduce peers after X time.
d.queue.mu.Lock() badPeers := d.queue.Expire(blockTtl)
var badPeers []string
for pid, chunk := range d.queue.fetching {
if time.Since(chunk.itime) > blockTtl {
badPeers = append(badPeers, pid)
// remove peer as good peer from peer list
// FIXME d.UnregisterPeer(pid)
}
}
d.queue.mu.Unlock()
for _, pid := range badPeers { for _, pid := range badPeers {
// A nil chunk is delivered so that the chunk's hashes are given
// back to the queue objects. When hashes are put back in the queue
// other (decent) peers can pick them up.
// XXX We could make use of a reputation system here ranking peers // XXX We could make use of a reputation system here ranking peers
// in their performance // in their performance
// 1) Time for them to respond; // 1) Time for them to respond;
// 2) Measure their speed; // 2) Measure their speed;
// 3) Amount and availability. // 3) Amount and availability.
d.queue.deliver(pid, nil)
if peer := d.peers[pid]; peer != nil { if peer := d.peers[pid]; peer != nil {
peer.demote() peer.demote()
peer.reset() peer.reset()
@ -486,7 +436,7 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error {
if glog.V(logger.Detail) && len(hashes) != 0 { if glog.V(logger.Detail) && len(hashes) != 0 {
from, to := hashes[0], hashes[len(hashes)-1] from, to := hashes[0], hashes[len(hashes)-1]
glog.Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.hashPool.Size(), from[:4], to[:4], id) glog.Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.Pending(), from[:4], to[:4], id)
} }
d.hashCh <- hashPack{id, hashes} d.hashCh <- hashPack{id, hashes}

@ -128,7 +128,7 @@ func TestDownload(t *testing.T) {
t.Error("download error", err) t.Error("download error", err)
} }
inqueue := len(tester.downloader.queue.blocks) inqueue := len(tester.downloader.queue.blockCache)
if inqueue != targetBlocks { if inqueue != targetBlocks {
t.Error("expected", targetBlocks, "have", inqueue) t.Error("expected", targetBlocks, "have", inqueue)
} }
@ -151,7 +151,7 @@ func TestMissing(t *testing.T) {
t.Error("download error", err) t.Error("download error", err)
} }
inqueue := len(tester.downloader.queue.blocks) inqueue := len(tester.downloader.queue.blockCache)
if inqueue != targetBlocks { if inqueue != targetBlocks {
t.Error("expected", targetBlocks, "have", inqueue) t.Error("expected", targetBlocks, "have", inqueue)
} }

@ -78,7 +78,7 @@ func newPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blo
} }
// fetch a chunk using the peer // fetch a chunk using the peer
func (p *peer) fetch(chunk *chunk) error { func (p *peer) fetch(request *fetchRequest) error {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
@ -88,13 +88,12 @@ func (p *peer) fetch(chunk *chunk) error {
// set working state // set working state
p.state = workingState p.state = workingState
// convert the set to a fetchable slice
hashes, i := make([]common.Hash, chunk.hashes.Size()), 0 // Convert the hash set to a fetchable slice
chunk.hashes.Each(func(v interface{}) bool { hashes := make([]common.Hash, 0, len(request.Hashes))
hashes[i] = v.(common.Hash) for hash, _ := range request.Hashes {
i++ hashes = append(hashes, hash)
return true }
})
p.getBlocks(hashes) p.getBlocks(hashes)
return nil return nil

@ -1,201 +1,349 @@
package downloader package downloader
import ( import (
"errors"
"fmt" "fmt"
"math"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"gopkg.in/fatih/set.v0" "gopkg.in/karalabe/cookiejar.v2/collections/prque"
) )
const (
blockCacheLimit = 4096 // Maximum number of blocks to cache before throttling the download
)
// fetchRequest is a currently running block retrieval operation.
type fetchRequest struct {
Peer *peer // Peer to which the request was sent
Hashes map[common.Hash]int // Requested hashes with their insertion index (priority)
Time time.Time // Time when the request was made
}
// queue represents hashes that are either need fetching or are being fetched // queue represents hashes that are either need fetching or are being fetched
type queue struct { type queue struct {
hashPool *set.Set hashPool map[common.Hash]int // Pending hashes, mapping to their insertion index (priority)
fetchPool *set.Set hashQueue *prque.Prque // Priority queue of the block hashes to fetch
blockHashes *set.Set hashCounter int // Counter indexing the added hashes to ensure retrieval order
pendPool map[string]*fetchRequest // Currently pending block retrieval operations
pendCount int // Number of pending block fetches (to throttle the download)
mu sync.Mutex blockPool map[common.Hash]int // Hash-set of the downloaded data blocks, mapping to cache indexes
fetching map[string]*chunk blockCache []*types.Block // Downloaded but not yet delivered blocks
blockOffset int // Offset of the first cached block in the block-chain
blockOffset int lock sync.RWMutex
blocks []*types.Block
} }
func newqueue() *queue { // newQueue creates a new download queue for scheduling block retrieval.
func newQueue() *queue {
return &queue{ return &queue{
hashPool: set.New(), hashPool: make(map[common.Hash]int),
fetchPool: set.New(), hashQueue: prque.New(),
blockHashes: set.New(), pendPool: make(map[string]*fetchRequest),
fetching: make(map[string]*chunk), blockPool: make(map[common.Hash]int),
} }
} }
func (c *queue) reset() { // Reset clears out the queue contents.
c.mu.Lock() func (q *queue) Reset() {
defer c.mu.Unlock() q.lock.Lock()
defer q.lock.Unlock()
c.resetNoTS() q.hashPool = make(map[common.Hash]int)
q.hashQueue.Reset()
q.hashCounter = 0
q.pendPool = make(map[string]*fetchRequest)
q.pendCount = 0
q.blockPool = make(map[common.Hash]int)
q.blockOffset = 0
q.blockCache = nil
} }
func (c *queue) resetNoTS() {
c.blockOffset = 0 // Done checks if all the downloads have been retrieved, wiping the queue.
c.hashPool.Clear() func (q *queue) Done() {
c.fetchPool.Clear() q.lock.Lock()
c.blockHashes.Clear() defer q.lock.Unlock()
c.blocks = nil
c.fetching = make(map[string]*chunk) if len(q.blockCache) == 0 {
q.Reset()
}
} }
func (c *queue) size() int { // Size retrieves the number of hashes in the queue, returning separately for
return c.hashPool.Size() + c.blockHashes.Size() + c.fetchPool.Size() // pending and already downloaded.
func (q *queue) Size() (int, int) {
q.lock.RLock()
defer q.lock.RUnlock()
return len(q.hashPool), len(q.blockPool)
} }
// reserve a `max` set of hashes for `p` peer. // Pending retrieves the number of hashes pending for retrieval.
func (c *queue) get(p *peer, max int) *chunk { func (q *queue) Pending() int {
c.mu.Lock() q.lock.RLock()
defer c.mu.Unlock() defer q.lock.RUnlock()
// return nothing if the pool has been depleted return q.hashQueue.Size()
if c.hashPool.Size() == 0 { }
return nil
}
limit := int(math.Min(float64(max), float64(c.hashPool.Size()))) // InFlight retrieves the number of fetch requests currently in flight.
// Create a new set of hashes func (q *queue) InFlight() int {
hashes, i := set.New(), 0 q.lock.RLock()
c.hashPool.Each(func(v interface{}) bool { defer q.lock.RUnlock()
// break on limit
if i == limit {
return false
}
// skip any hashes that have previously been requested from the peer
if p.ignored.Has(v) {
return true
}
hashes.Add(v) return len(q.pendPool)
i++ }
// Throttle checks if the download should be throttled (active block fetches
// exceed block cache).
func (q *queue) Throttle() bool {
q.lock.RLock()
defer q.lock.RUnlock()
return q.pendCount >= len(q.blockCache)-len(q.blockPool)
}
// Has checks if a hash is within the download queue or not.
func (q *queue) Has(hash common.Hash) bool {
q.lock.RLock()
defer q.lock.RUnlock()
if _, ok := q.hashPool[hash]; ok {
return true
}
if _, ok := q.blockPool[hash]; ok {
return true return true
})
// if no hashes can be requested return a nil chunk
if hashes.Size() == 0 {
return nil
} }
return false
}
// remove the fetchable hashes from hash pool // Insert adds a set of hashes for the download queue for scheduling.
c.hashPool.Separate(hashes) func (q *queue) Insert(hashes []common.Hash) {
c.fetchPool.Merge(hashes) q.lock.Lock()
defer q.lock.Unlock()
// Create a new chunk for the seperated hashes. The time is being used // Insert all the hashes prioritized in the arrival order
// to reset the chunk (timeout) for i, hash := range hashes {
chunk := &chunk{p, hashes, time.Now()} index := q.hashCounter + i
// register as 'fetching' state
c.fetching[p.id] = chunk
// create new chunk for peer q.hashPool[hash] = index
return chunk q.hashQueue.Push(hash, float32(index)) // Highest gets schedules first
}
// Update the hash counter for the next batch of inserts
q.hashCounter += len(hashes)
} }
func (c *queue) has(hash common.Hash) bool { // GetHeadBlock retrieves the first block from the cache, or nil if it hasn't
return c.hashPool.Has(hash) || c.fetchPool.Has(hash) || c.blockHashes.Has(hash) // been downloaded yet (or simply non existent).
func (q *queue) GetHeadBlock() *types.Block {
q.lock.RLock()
defer q.lock.RUnlock()
if len(q.blockCache) == 0 {
return nil
}
return q.blockCache[0]
} }
func (c *queue) getBlock(hash common.Hash) *types.Block { // GetBlock retrieves a downloaded block, or nil if non-existent.
c.mu.Lock() func (q *queue) GetBlock(hash common.Hash) *types.Block {
defer c.mu.Unlock() q.lock.RLock()
defer q.lock.RUnlock()
if !c.blockHashes.Has(hash) { // Short circuit if the block hasn't been downloaded yet
index, ok := q.blockPool[hash]
if !ok {
return nil return nil
} }
// Return the block if it's still available in the cache
for _, block := range c.blocks { if q.blockOffset <= index && index < q.blockOffset+len(q.blockCache) {
if block.Hash() == hash { return q.blockCache[index-q.blockOffset]
return block
}
} }
return nil return nil
} }
// deliver delivers a chunk to the queue that was requested of the peer // TakeBlocks retrieves and permanently removes a batch of blocks from the cache.
func (c *queue) deliver(id string, blocks []*types.Block) (err error) { // The head parameter is required to prevent a race condition where concurrent
c.mu.Lock() // takes may fail parent verifications.
defer c.mu.Unlock() func (q *queue) TakeBlocks(head *types.Block) types.Blocks {
q.lock.Lock()
chunk := c.fetching[id] defer q.lock.Unlock()
// If the chunk was never requested simply ignore it
if chunk != nil {
delete(c.fetching, id)
// check the length of the returned blocks. If the length of blocks is 0
// we'll assume the peer doesn't know about the chain.
if len(blocks) == 0 {
// So we can ignore the blocks we didn't know about
chunk.peer.ignored.Merge(chunk.hashes)
}
// Add the blocks // Short circuit if the head block's different
for i, block := range blocks { if len(q.blockCache) == 0 || q.blockCache[0] != head {
// See (1) for future limitation return nil
n := int(block.NumberU64()) - c.blockOffset }
if n > len(c.blocks) || n < 0 { // Otherwise accumulate all available blocks
// set the error and set the blocks which could be processed var blocks types.Blocks
// abort the rest of the blocks (FIXME this could be improved) for _, block := range q.blockCache {
err = fmt.Errorf("received block which overflow (N=%v O=%v)", block.Number(), c.blockOffset) if block == nil {
blocks = blocks[:i] break
break
}
c.blocks[n] = block
} }
// seperate the blocks and the hashes blocks = append(blocks, block)
blockHashes := chunk.fetchedHashes(blocks) delete(q.blockPool, block.Hash())
// merge block hashes }
c.blockHashes.Merge(blockHashes) // Delete the blocks from the slice and let them be garbage collected
// Add back whatever couldn't be delivered // without this slice trick the blocks would stay in memory until nil
c.hashPool.Merge(chunk.hashes) // would be assigned to q.blocks
// Remove the hashes from the fetch pool copy(q.blockCache, q.blockCache[len(blocks):])
c.fetchPool.Separate(chunk.hashes) for k, n := len(q.blockCache)-len(blocks), len(q.blockCache); k < n; k++ {
q.blockCache[k] = nil
} }
q.blockOffset += len(blocks)
return return blocks
} }
func (c *queue) alloc(offset, size int) { // Reserve reserves a set of hashes for the given peer, skipping any previously
c.mu.Lock() // failed download.
defer c.mu.Unlock() func (q *queue) Reserve(p *peer, max int) *fetchRequest {
q.lock.Lock()
defer q.lock.Unlock()
if c.blockOffset < offset { // Short circuit if the pool has been depleted
c.blockOffset = offset if q.hashQueue.Empty() {
return nil
} }
// Retrieve a batch of hashes, skipping previously failed ones
// (1) XXX at some point we could limit allocation to memory and use the disk send := make(map[common.Hash]int)
// to store future blocks. skip := make(map[common.Hash]int)
if len(c.blocks) < size {
c.blocks = append(c.blocks, make([]*types.Block, size)...) for len(send) < max && !q.hashQueue.Empty() {
hash, priority := q.hashQueue.Pop()
if p.ignored.Has(hash) {
skip[hash.(common.Hash)] = int(priority)
} else {
send[hash.(common.Hash)] = int(priority)
}
}
// Merge all the skipped hashes back
for hash, index := range skip {
q.hashQueue.Push(hash, float32(index))
}
// Assemble and return the block download request
if len(send) == 0 {
return nil
} }
request := &fetchRequest{
Peer: p,
Hashes: send,
Time: time.Now(),
}
q.pendPool[p.id] = request
q.pendCount += len(request.Hashes)
return request
} }
// puts puts sets of hashes on to the queue for fetching // Cancel aborts a fetch request, returning all pending hashes to the queue.
func (c *queue) put(hashes *set.Set) { func (q *queue) Cancel(request *fetchRequest) {
c.mu.Lock() q.lock.Lock()
defer c.mu.Unlock() defer q.lock.Unlock()
c.hashPool.Merge(hashes) for hash, index := range request.Hashes {
q.hashQueue.Push(hash, float32(index))
}
delete(q.pendPool, request.Peer.id)
q.pendCount -= len(request.Hashes)
} }
type chunk struct { // Expire checks for in flight requests that exceeded a timeout allowance,
peer *peer // canceling them and returning the responsible peers for penalization.
hashes *set.Set func (q *queue) Expire(timeout time.Duration) []string {
itime time.Time q.lock.Lock()
defer q.lock.Unlock()
// Iterate over the expired requests and return each to the queue
peers := []string{}
for id, request := range q.pendPool {
if time.Since(request.Time) > timeout {
for hash, index := range request.Hashes {
q.hashQueue.Push(hash, float32(index))
}
q.pendCount -= len(request.Hashes)
peers = append(peers, id)
}
}
// Remove the expired requests from the pending pool
for _, id := range peers {
delete(q.pendPool, id)
}
return peers
} }
func (ch *chunk) fetchedHashes(blocks []*types.Block) *set.Set { // Deliver injects a block retrieval response into the download queue.
fhashes := set.New() func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
q.lock.Lock()
defer q.lock.Unlock()
// Short circuit if the blocks were never requested
request := q.pendPool[id]
if request == nil {
return errors.New("no fetches pending")
}
delete(q.pendPool, id)
// Mark all the hashes in the request as non-pending
q.pendCount -= len(request.Hashes)
// If no blocks were retrieved, mark them as unavailable for the origin peer
if len(blocks) == 0 {
for hash, _ := range request.Hashes {
request.Peer.ignored.Add(hash)
}
}
// Iterate over the downloaded blocks and add each of them
errs := make([]error, 0)
for _, block := range blocks { for _, block := range blocks {
fhashes.Add(block.Hash()) // Skip any blocks that fall outside the cache range
index := int(block.NumberU64()) - q.blockOffset
if index >= len(q.blockCache) || index < 0 {
//fmt.Printf("block cache overflown (N=%v O=%v, C=%v)", block.Number(), q.blockOffset, len(q.blockCache))
continue
}
// Skip any blocks that were not requested
hash := block.Hash()
if _, ok := request.Hashes[hash]; !ok {
errs = append(errs, fmt.Errorf("non-requested block %v", hash))
continue
}
// Otherwise merge the block and mark the hash block
q.blockCache[index] = block
delete(request.Hashes, hash)
delete(q.hashPool, hash)
q.blockPool[hash] = int(block.NumberU64())
} }
ch.hashes.Separate(fhashes) // Return all failed fetches to the queue
for hash, index := range request.Hashes {
q.hashQueue.Push(hash, float32(index))
}
if len(errs) != 0 {
return fmt.Errorf("multiple failures: %v", errs)
}
return nil
}
return fhashes // Alloc ensures that the block cache is the correct size, given a starting
// offset, and a memory cap.
func (q *queue) Alloc(offset int) {
q.lock.Lock()
defer q.lock.Unlock()
if q.blockOffset < offset {
q.blockOffset = offset
}
size := len(q.hashPool)
if size > blockCacheLimit {
size = blockCacheLimit
}
if len(q.blockCache) < size {
q.blockCache = append(q.blockCache, make([]*types.Block, size-len(q.blockCache))...)
}
} }

@ -32,31 +32,30 @@ func createBlocksFromHashSet(hashes *set.Set) []*types.Block {
} }
func TestChunking(t *testing.T) { func TestChunking(t *testing.T) {
queue := newqueue() queue := newQueue()
peer1 := newPeer("peer1", common.Hash{}, nil, nil) peer1 := newPeer("peer1", common.Hash{}, nil, nil)
peer2 := newPeer("peer2", common.Hash{}, nil, nil) peer2 := newPeer("peer2", common.Hash{}, nil, nil)
// 99 + 1 (1 == known genesis hash) // 99 + 1 (1 == known genesis hash)
hashes := createHashes(0, 99) hashes := createHashes(0, 99)
hashSet := createHashSet(hashes) queue.Insert(hashes)
queue.put(hashSet)
chunk1 := queue.get(peer1, 99) chunk1 := queue.Reserve(peer1, 99)
if chunk1 == nil { if chunk1 == nil {
t.Errorf("chunk1 is nil") t.Errorf("chunk1 is nil")
t.FailNow() t.FailNow()
} }
chunk2 := queue.get(peer2, 99) chunk2 := queue.Reserve(peer2, 99)
if chunk2 == nil { if chunk2 == nil {
t.Errorf("chunk2 is nil") t.Errorf("chunk2 is nil")
t.FailNow() t.FailNow()
} }
if chunk1.hashes.Size() != 99 { if len(chunk1.Hashes) != 99 {
t.Error("expected chunk1 hashes to be 99, got", chunk1.hashes.Size()) t.Error("expected chunk1 hashes to be 99, got", len(chunk1.Hashes))
} }
if chunk2.hashes.Size() != 1 { if len(chunk2.Hashes) != 1 {
t.Error("expected chunk1 hashes to be 1, got", chunk2.hashes.Size()) t.Error("expected chunk1 hashes to be 1, got", len(chunk2.Hashes))
} }
} }

Loading…
Cancel
Save