From 4800c94392e814a2cb9d343aab4706be0cd0851d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 6 May 2015 15:32:53 +0300 Subject: [PATCH 01/19] eth/downloader: prioritize block fetch based on chain position, cap memory use --- Godeps/Godeps.json | 4 + .../collections/prque/example_test.go | 44 ++ .../cookiejar.v2/collections/prque/prque.go | 75 ++++ .../collections/prque/prque_test.go | 110 +++++ .../cookiejar.v2/collections/prque/sstack.go | 103 +++++ .../collections/prque/sstack_test.go | 93 ++++ eth/downloader/downloader.go | 190 +++----- eth/downloader/downloader_test.go | 4 +- eth/downloader/peer.go | 15 +- eth/downloader/queue.go | 416 ++++++++++++------ eth/downloader/queue_test.go | 17 +- 11 files changed, 798 insertions(+), 273 deletions(-) create mode 100644 Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/example_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go create mode 100644 Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go create mode 100644 Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go create mode 100644 Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 2480ff9a24..a5b27e76c5 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -98,6 +98,10 @@ "Comment": "v0.1.0-3-g27c4092", "Rev": "27c40922c40b43fe04554d8223a402af3ea333f3" }, + { + "ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque", + "Rev": "cf5d8079df7c4501217638e1e3a6e43f94822548" + }, { "ImportPath": "gopkg.in/qml.v1/cdata", "Rev": "1116cb9cd8dee23f8d444ded354eb53122739f99" diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/example_test.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/example_test.go new file mode 100644 index 0000000000..7b2e5ee844 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/example_test.go @@ -0,0 +1,44 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later +// version. +// +// The toolbox is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// +// Alternatively, the CookieJar toolbox may be used in accordance with the terms +// and conditions contained in a signed written agreement between you and the +// author(s). + +package prque_test + +import ( + "fmt" + + "gopkg.in/karalabe/cookiejar.v2/collections/prque" +) + +// Insert some data into a priority queue and pop them out in prioritized order. +func Example_usage() { + // Define some data to push into the priority queue + prio := []float32{77.7, 22.2, 44.4, 55.5, 11.1, 88.8, 33.3, 99.9, 0.0, 66.6} + data := []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"} + + // Create the priority queue and insert the prioritized data + pq := prque.New() + for i := 0; i < len(data); i++ { + pq.Push(data[i], prio[i]) + } + // Pop out the data and print them + for !pq.Empty() { + val, prio := pq.Pop() + fmt.Printf("%.1f:%s ", prio, val) + } + // Output: + // 99.9:seven 88.8:five 77.7:zero 66.6:nine 55.5:three 44.4:two 33.3:six 22.2:one 11.1:four 0.0:eight +} diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go new file mode 100644 index 0000000000..8225e8c53b --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go @@ -0,0 +1,75 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later +// version. +// +// The toolbox is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// +// Alternatively, the CookieJar toolbox may be used in accordance with the terms +// and conditions contained in a signed written agreement between you and the +// author(s). + +// Package prque implements a priority queue data structure supporting arbitrary +// value types and float priorities. +// +// The reasoning behind using floats for the priorities vs. ints or interfaces +// was larger flexibility without sacrificing too much performance or code +// complexity. +// +// If you would like to use a min-priority queue, simply negate the priorities. +// +// Internally the queue is based on the standard heap package working on a +// sortable version of the block based stack. +package prque + +import ( + "container/heap" +) + +// Priority queue data structure. +type Prque struct { + cont *sstack +} + +// Creates a new priority queue. +func New() *Prque { + return &Prque{newSstack()} +} + +// Pushes a value with a given priority into the queue, expanding if necessary. +func (p *Prque) Push(data interface{}, priority float32) { + heap.Push(p.cont, &item{data, priority}) +} + +// Pops the value with the greates priority off the stack and returns it. +// Currently no shrinking is done. +func (p *Prque) Pop() (interface{}, float32) { + item := heap.Pop(p.cont).(*item) + return item.value, item.priority +} + +// Pops only the item from the queue, dropping the associated priority value. +func (p *Prque) PopItem() interface{} { + return heap.Pop(p.cont).(*item).value +} + +// Checks whether the priority queue is empty. +func (p *Prque) Empty() bool { + return p.cont.Len() == 0 +} + +// Returns the number of element in the priority queue. +func (p *Prque) Size() int { + return p.cont.Len() +} + +// Clears the contents of the priority queue. +func (p *Prque) Reset() { + p.cont.Reset() +} diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go new file mode 100644 index 0000000000..811c53c737 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go @@ -0,0 +1,110 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later +// version. +// +// The toolbox is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// +// Alternatively, the CookieJar toolbox may be used in accordance with the terms +// and conditions contained in a signed written agreement between you and the +// author(s). + +package prque + +import ( + "math/rand" + "testing" +) + +func TestPrque(t *testing.T) { + // Generate a batch of random data and a specific priority order + size := 16 * blockSize + prio := rand.Perm(size) + data := make([]int, size) + for i := 0; i < size; i++ { + data[i] = rand.Int() + } + queue := New() + for rep := 0; rep < 2; rep++ { + // Fill a priority queue with the above data + for i := 0; i < size; i++ { + queue.Push(data[i], float32(prio[i])) + if queue.Size() != i+1 { + t.Errorf("queue size mismatch: have %v, want %v.", queue.Size(), i+1) + } + } + // Create a map the values to the priorities for easier verification + dict := make(map[float32]int) + for i := 0; i < size; i++ { + dict[float32(prio[i])] = data[i] + } + // Pop out the elements in priority order and verify them + prevPrio := float32(size + 1) + for !queue.Empty() { + val, prio := queue.Pop() + if prio > prevPrio { + t.Errorf("invalid priority order: %v after %v.", prio, prevPrio) + } + prevPrio = prio + if val != dict[prio] { + t.Errorf("push/pop mismatch: have %v, want %v.", val, dict[prio]) + } + delete(dict, prio) + } + } +} + +func TestReset(t *testing.T) { + // Fill the queue with some random data + size := 16 * blockSize + queue := New() + for i := 0; i < size; i++ { + queue.Push(rand.Int(), rand.Float32()) + } + // Reset and ensure it's empty + queue.Reset() + if !queue.Empty() { + t.Errorf("priority queue not empty after reset: %v", queue) + } +} + +func BenchmarkPush(b *testing.B) { + // Create some initial data + data := make([]int, b.N) + prio := make([]float32, b.N) + for i := 0; i < len(data); i++ { + data[i] = rand.Int() + prio[i] = rand.Float32() + } + // Execute the benchmark + b.ResetTimer() + queue := New() + for i := 0; i < len(data); i++ { + queue.Push(data[i], prio[i]) + } +} + +func BenchmarkPop(b *testing.B) { + // Create some initial data + data := make([]int, b.N) + prio := make([]float32, b.N) + for i := 0; i < len(data); i++ { + data[i] = rand.Int() + prio[i] = rand.Float32() + } + queue := New() + for i := 0; i < len(data); i++ { + queue.Push(data[i], prio[i]) + } + // Execute the benchmark + b.ResetTimer() + for !queue.Empty() { + queue.Pop() + } +} diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go new file mode 100644 index 0000000000..55375a0910 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go @@ -0,0 +1,103 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later +// version. +// +// The toolbox is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// +// Alternatively, the CookieJar toolbox may be used in accordance with the terms +// and conditions contained in a signed written agreement between you and the +// author(s). + +package prque + +// The size of a block of data +const blockSize = 4096 + +// A prioritized item in the sorted stack. +type item struct { + value interface{} + priority float32 +} + +// Internal sortable stack data structure. Implements the Push and Pop ops for +// the stack (heap) functionality and the Len, Less and Swap methods for the +// sortability requirements of the heaps. +type sstack struct { + size int + capacity int + offset int + + blocks [][]*item + active []*item +} + +// Creates a new, empty stack. +func newSstack() *sstack { + result := new(sstack) + result.active = make([]*item, blockSize) + result.blocks = [][]*item{result.active} + result.capacity = blockSize + return result +} + +// Pushes a value onto the stack, expanding it if necessary. Required by +// heap.Interface. +func (s *sstack) Push(data interface{}) { + if s.size == s.capacity { + s.active = make([]*item, blockSize) + s.blocks = append(s.blocks, s.active) + s.capacity += blockSize + s.offset = 0 + } else if s.offset == blockSize { + s.active = s.blocks[s.size/blockSize] + s.offset = 0 + } + s.active[s.offset] = data.(*item) + s.offset++ + s.size++ +} + +// Pops a value off the stack and returns it. Currently no shrinking is done. +// Required by heap.Interface. +func (s *sstack) Pop() (res interface{}) { + s.size-- + s.offset-- + if s.offset < 0 { + s.offset = blockSize - 1 + s.active = s.blocks[s.size/blockSize] + } + res, s.active[s.offset] = s.active[s.offset], nil + return +} + +// Returns the length of the stack. Required by sort.Interface. +func (s *sstack) Len() int { + return s.size +} + +// Compares the priority of two elements of the stack (higher is first). +// Required by sort.Interface. +func (s *sstack) Less(i, j int) bool { + return s.blocks[i/blockSize][i%blockSize].priority > s.blocks[j/blockSize][j%blockSize].priority +} + +// Swapts two elements in the stack. Required by sort.Interface. +func (s *sstack) Swap(i, j int) { + ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize + s.blocks[ib][io], s.blocks[jb][jo] = s.blocks[jb][jo], s.blocks[ib][io] +} + +// Resets the stack, effectively clearing its contents. +func (s *sstack) Reset() { + s.size = 0 + s.offset = 0 + s.active = s.blocks[0] + s.capacity = blockSize +} diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go new file mode 100644 index 0000000000..7bdc08bf5d --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go @@ -0,0 +1,93 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: you can redistribute it and/or modify it under +// the terms of the GNU General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later +// version. +// +// The toolbox is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// +// Alternatively, the CookieJar toolbox may be used in accordance with the terms +// and conditions contained in a signed written agreement between you and the +// author(s). + +package prque + +import ( + "math/rand" + "sort" + "testing" +) + +func TestSstack(t *testing.T) { + // Create some initial data + size := 16 * blockSize + data := make([]*item, size) + for i := 0; i < size; i++ { + data[i] = &item{rand.Int(), rand.Float32()} + } + stack := newSstack() + for rep := 0; rep < 2; rep++ { + // Push all the data into the stack, pop out every second + secs := []*item{} + for i := 0; i < size; i++ { + stack.Push(data[i]) + if i%2 == 0 { + secs = append(secs, stack.Pop().(*item)) + } + } + rest := []*item{} + for stack.Len() > 0 { + rest = append(rest, stack.Pop().(*item)) + } + // Make sure the contents of the resulting slices are ok + for i := 0; i < size; i++ { + if i%2 == 0 && data[i] != secs[i/2] { + t.Errorf("push/pop mismatch: have %v, want %v.", secs[i/2], data[i]) + } + if i%2 == 1 && data[i] != rest[len(rest)-i/2-1] { + t.Errorf("push/pop mismatch: have %v, want %v.", rest[len(rest)-i/2-1], data[i]) + } + } + } +} + +func TestSstackSort(t *testing.T) { + // Create some initial data + size := 16 * blockSize + data := make([]*item, size) + for i := 0; i < size; i++ { + data[i] = &item{rand.Int(), float32(i)} + } + // Push all the data into the stack + stack := newSstack() + for _, val := range data { + stack.Push(val) + } + // Sort and pop the stack contents (should reverse the order) + sort.Sort(stack) + for _, val := range data { + out := stack.Pop() + if out != val { + t.Errorf("push/pop mismatch after sort: have %v, want %v.", out, val) + } + } +} + +func TestSstackReset(t *testing.T) { + // Push some stuff onto the stack + size := 16 * blockSize + stack := newSstack() + for i := 0; i < size; i++ { + stack.Push(&item{i, float32(i)}) + } + // Clear and verify + stack.Reset() + if stack.Len() != 0 { + t.Errorf("stack not empty after reset: %v", stack) + } +} diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 15f4cb0a3b..608acf499a 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -11,11 +11,10 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" - "gopkg.in/fatih/set.v0" ) const ( - maxBlockFetch = 256 // Amount of max blocks to be fetched per chunk + maxBlockFetch = 128 // Amount of max blocks to be fetched per chunk peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount hashTtl = 20 * time.Second // The amount of time it takes for a hash request to time out ) @@ -80,7 +79,7 @@ type Downloader struct { func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader { downloader := &Downloader{ - queue: newqueue(), + queue: newQueue(), peers: make(peers), hasBlock: hasBlock, getBlock: getBlock, @@ -93,7 +92,7 @@ func New(hasBlock hashCheckFn, getBlock getBlockFn) *Downloader { } func (d *Downloader) Stats() (current int, max int) { - return d.queue.blockHashes.Size(), d.queue.fetchPool.Size() + d.queue.hashPool.Size() + return d.queue.Size() } func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error { @@ -111,7 +110,7 @@ func (d *Downloader) RegisterPeer(id string, hash common.Hash, getHashes hashFet return nil } -// UnregisterPeer unregister's a peer. This will prevent any action from the specified peer. +// UnregisterPeer unregisters a peer. This will prevent any action from the specified peer. func (d *Downloader) UnregisterPeer(id string) { d.mu.Lock() defer d.mu.Unlock() @@ -121,20 +120,20 @@ func (d *Downloader) UnregisterPeer(id string) { delete(d.peers, id) } -// SynchroniseWithPeer will select the peer and use it for synchronising. If an empty string is given -// it will use the best peer possible and synchronise if it's TD is higher than our own. If any of the +// SynchroniseWithPeer will select the peer and use it for synchronizing. If an empty string is given +// it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous func (d *Downloader) Synchronise(id string, hash common.Hash) error { // Make sure it's doing neither. Once done we can restart the // downloading process if the TD is higher. For now just get on - // with whatever is going on. This prevents unecessary switching. + // with whatever is going on. This prevents unnecessary switching. if d.isBusy() { return errBusy } - // When a synchronisation attempt is made while the queue stil + // When a synchronization attempt is made while the queue still // contains items we abort the sync attempt - if d.queue.size() > 0 { + if done, pend := d.queue.Size(); done+pend > 0 { return errPendingQueue } @@ -157,56 +156,23 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { // are processed. If the block count reaches zero and done is called // we reset the queue for the next batch of incoming hashes and blocks. func (d *Downloader) Done() { - d.queue.mu.Lock() - defer d.queue.mu.Unlock() - - if len(d.queue.blocks) == 0 { - d.queue.resetNoTS() - } + d.queue.Done() } // TakeBlocks takes blocks from the queue and yields them to the blockTaker handler // it's possible it yields no blocks func (d *Downloader) TakeBlocks() types.Blocks { - d.queue.mu.Lock() - defer d.queue.mu.Unlock() - - var blocks types.Blocks - if len(d.queue.blocks) > 0 { - // Make sure the parent hash is known - if d.queue.blocks[0] != nil && !d.hasBlock(d.queue.blocks[0].ParentHash()) { - return nil - } - - for _, block := range d.queue.blocks { - if block == nil { - break - } - - blocks = append(blocks, block) - } - d.queue.blockOffset += len(blocks) - // delete the blocks from the slice and let them be garbage collected - // without this slice trick the blocks would stay in memory until nil - // would be assigned to d.queue.blocks - copy(d.queue.blocks, d.queue.blocks[len(blocks):]) - for k, n := len(d.queue.blocks)-len(blocks), len(d.queue.blocks); k < n; k++ { - d.queue.blocks[k] = nil - } - d.queue.blocks = d.queue.blocks[:len(d.queue.blocks)-len(blocks)] - - //d.queue.blocks = d.queue.blocks[len(blocks):] - if len(d.queue.blocks) == 0 { - d.queue.blocks = nil - } - + // Check that there are blocks available and its parents are known + head := d.queue.GetHeadBlock() + if head == nil || !d.hasBlock(head.ParentHash()) { + return nil } - - return blocks + // Retrieve a full batch of blocks + return d.queue.TakeBlocks(head) } func (d *Downloader) Has(hash common.Hash) bool { - return d.queue.has(hash) + return d.queue.Has(hash) } func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) { @@ -214,7 +180,7 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) defer func() { // reset on error if err != nil { - d.queue.reset() + d.queue.Reset() } }() @@ -244,7 +210,7 @@ func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial b atomic.StoreInt32(&d.fetchingHashes, 1) defer atomic.StoreInt32(&d.fetchingHashes, 0) - if d.queue.has(h) { + if d.queue.Has(h) { // TODO: Is this possible? Shouldn't queue be empty for startFetchingHashes to be even called? return errAlreadyInPool } @@ -256,7 +222,7 @@ func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial b // In such circumstances we don't need to download the block so don't add it to the queue. if !ignoreInitial { // Add the hash to the queue first - d.queue.hashPool.Add(h) + d.queue.Insert([]common.Hash{h}) } // Get the first batch of hashes p.getHashes(h) @@ -273,7 +239,7 @@ out: for { select { case hashPack := <-d.hashCh: - // make sure the active peer is giving us the hashes + // Make sure the active peer is giving us the hashes if hashPack.peerId != activePeer.id { glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)\n", hashPack.peerId) break @@ -281,43 +247,37 @@ out: failureResponseTimer.Reset(hashTtl) - var ( - hashes = hashPack.hashes - done bool // determines whether we're done fetching hashes (i.e. common hash found) - ) - hashSet := set.New() - for _, hash = range hashes { - if d.hasBlock(hash) || d.queue.blockHashes.Has(hash) { - glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4]) + // Make sure the peer actually gave something valid + if len(hashPack.hashes) == 0 { + glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", activePeer.id) + d.queue.Reset() + return errEmptyHashSet + } + // Determine if we're done fetching hashes (queue up all pending), and continue if not done + done, index := false, 0 + for index, hash = range hashPack.hashes { + if d.hasBlock(hash) || d.queue.GetBlock(hash) != nil { + glog.V(logger.Debug).Infof("Found common hash %x\n", hash[:4]) + hashPack.hashes = hashPack.hashes[:index] done = true break } - - hashSet.Add(hash) } - d.queue.put(hashSet) - - // Add hashes to the chunk set - if len(hashes) == 0 { // Make sure the peer actually gave you something valid - glog.V(logger.Debug).Infof("Peer (%s) responded with empty hash set\n", activePeer.id) - d.queue.reset() + d.queue.Insert(hashPack.hashes) - return errEmptyHashSet - } else if !done { // Check if we're done fetching - // Get the next set of hashes + if !done { activePeer.getHashes(hash) - } else { // we're done - // The offset of the queue is determined by the highest known block - var offset int - if block := d.getBlock(hash); block != nil { - offset = int(block.NumberU64() + 1) - } - // allocate proper size for the queueue - d.queue.alloc(offset, d.queue.hashPool.Size()) - - break out + continue } + // We're done, allocate the download cache and proceed pulling the blocks + offset := 0 + if block := d.getBlock(hash); block != nil { + offset = int(block.NumberU64() + 1) + } + d.queue.Alloc(offset) + break out + case <-failureResponseTimer.C: glog.V(logger.Debug).Infof("Peer (%s) didn't respond in time for hash request\n", p.id) @@ -326,7 +286,7 @@ out: // already fetched hash list. This can't guarantee 100% correctness but does // a fair job. This is always either correct or false incorrect. for id, peer := range d.peers { - if d.queue.hashPool.Has(peer.recentHash) && !attemptedPeers[id] { + if d.queue.Has(peer.recentHash) && !attemptedPeers[id] { p = peer break } @@ -335,7 +295,7 @@ out: // if all peers have been tried, abort the process entirely or if the hash is // the zero hash. if p == nil || (hash == common.Hash{}) { - d.queue.reset() + d.queue.Reset() return errTimeout } @@ -346,13 +306,14 @@ out: glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)\n", p.id) } } - glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.hashPool.Size(), time.Since(start)) + glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.Pending(), time.Since(start)) return nil } func (d *Downloader) startFetchingBlocks(p *peer) error { - glog.V(logger.Detail).Infoln("Downloading", d.queue.hashPool.Size(), "block(s)") + glog.V(logger.Detail).Infoln("Downloading", d.queue.Pending(), "block(s)") + atomic.StoreInt32(&d.downloadingBlocks, 1) defer atomic.StoreInt32(&d.downloadingBlocks, 0) // Defer the peer reset. This will empty the peer requested set @@ -362,7 +323,7 @@ func (d *Downloader) startFetchingBlocks(p *peer) error { start := time.Now() - // default ticker for re-fetching blocks everynow and then + // default ticker for re-fetching blocks every now and then ticker := time.NewTicker(20 * time.Millisecond) out: for { @@ -371,7 +332,7 @@ out: // If the peer was previously banned and failed to deliver it's pack // in a reasonable time frame, ignore it's message. if d.peers[blockPack.peerId] != nil { - err := d.queue.deliver(blockPack.peerId, blockPack.blocks) + err := d.queue.Deliver(blockPack.peerId, blockPack.blocks) if err != nil { glog.V(logger.Debug).Infof("deliver failed for peer %s: %v\n", blockPack.peerId, err) // FIXME d.UnregisterPeer(blockPack.peerId) @@ -385,46 +346,49 @@ out: d.peers.setState(blockPack.peerId, idleState) } case <-ticker.C: - // after removing bad peers make sure we actually have suffucient peer left to keep downlading + // after removing bad peers make sure we actually have sufficient peer left to keep downloading if len(d.peers) == 0 { - d.queue.reset() + d.queue.Reset() return errNoPeers } // If there are unrequested hashes left start fetching // from the available peers. - if d.queue.hashPool.Size() > 0 { + if d.queue.Pending() > 0 { + // Throttle the download if block cache is full and waiting processing + if d.queue.Throttle() { + continue + } + availablePeers := d.peers.get(idleState) for _, peer := range availablePeers { // Get a possible chunk. If nil is returned no chunk // could be returned due to no hashes available. - chunk := d.queue.get(peer, maxBlockFetch) - if chunk == nil { + request := d.queue.Reserve(peer, maxBlockFetch) + if request == nil { continue } - // XXX make fetch blocking. // Fetch the chunk and check for error. If the peer was somehow // already fetching a chunk due to a bug, it will be returned to // the queue - if err := peer.fetch(chunk); err != nil { + if err := peer.fetch(request); err != nil { // log for tracing glog.V(logger.Debug).Infof("peer %s received double work (state = %v)\n", peer.id, peer.state) - d.queue.put(chunk.hashes) + d.queue.Cancel(request) } } - // make sure that we have peers available for fetching. If all peers have been tried // and all failed throw an error - if len(d.queue.fetching) == 0 { - d.queue.reset() + if d.queue.InFlight() == 0 { + d.queue.Reset() - return fmt.Errorf("%v peers avaialable = %d. total peers = %d. hashes needed = %d", errPeersUnavailable, len(availablePeers), len(d.peers), d.queue.hashPool.Size()) + return fmt.Errorf("%v peers avaialable = %d. total peers = %d. hashes needed = %d", errPeersUnavailable, len(availablePeers), len(d.peers), d.queue.Pending()) } - } else if len(d.queue.fetching) == 0 { - // When there are no more queue and no more `fetching`. We can + } else if d.queue.InFlight() == 0 { + // When there are no more queue and no more in flight, We can // safely assume we're done. Another part of the process will check // for parent errors and will re-request anything that's missing break out @@ -434,27 +398,13 @@ out: // that badly or poorly behave are removed from the peer set (not banned). // Bad peers are excluded from the available peer set and therefor won't be // reused. XXX We could re-introduce peers after X time. - d.queue.mu.Lock() - var badPeers []string - for pid, chunk := range d.queue.fetching { - if time.Since(chunk.itime) > blockTtl { - badPeers = append(badPeers, pid) - // remove peer as good peer from peer list - // FIXME d.UnregisterPeer(pid) - } - } - d.queue.mu.Unlock() - + badPeers := d.queue.Expire(blockTtl) for _, pid := range badPeers { - // A nil chunk is delivered so that the chunk's hashes are given - // back to the queue objects. When hashes are put back in the queue - // other (decent) peers can pick them up. // XXX We could make use of a reputation system here ranking peers // in their performance // 1) Time for them to respond; // 2) Measure their speed; // 3) Amount and availability. - d.queue.deliver(pid, nil) if peer := d.peers[pid]; peer != nil { peer.demote() peer.reset() @@ -486,7 +436,7 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error { if glog.V(logger.Detail) && len(hashes) != 0 { from, to := hashes[0], hashes[len(hashes)-1] - glog.Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.hashPool.Size(), from[:4], to[:4], id) + glog.Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.Pending(), from[:4], to[:4], id) } d.hashCh <- hashPack{id, hashes} diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 872ea02eba..11834d7885 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -128,7 +128,7 @@ func TestDownload(t *testing.T) { t.Error("download error", err) } - inqueue := len(tester.downloader.queue.blocks) + inqueue := len(tester.downloader.queue.blockCache) if inqueue != targetBlocks { t.Error("expected", targetBlocks, "have", inqueue) } @@ -151,7 +151,7 @@ func TestMissing(t *testing.T) { t.Error("download error", err) } - inqueue := len(tester.downloader.queue.blocks) + inqueue := len(tester.downloader.queue.blockCache) if inqueue != targetBlocks { t.Error("expected", targetBlocks, "have", inqueue) } diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index 91977f5927..45ec1cbfd7 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -78,7 +78,7 @@ func newPeer(id string, hash common.Hash, getHashes hashFetcherFn, getBlocks blo } // fetch a chunk using the peer -func (p *peer) fetch(chunk *chunk) error { +func (p *peer) fetch(request *fetchRequest) error { p.mu.Lock() defer p.mu.Unlock() @@ -88,13 +88,12 @@ func (p *peer) fetch(chunk *chunk) error { // set working state p.state = workingState - // convert the set to a fetchable slice - hashes, i := make([]common.Hash, chunk.hashes.Size()), 0 - chunk.hashes.Each(func(v interface{}) bool { - hashes[i] = v.(common.Hash) - i++ - return true - }) + + // Convert the hash set to a fetchable slice + hashes := make([]common.Hash, 0, len(request.Hashes)) + for hash, _ := range request.Hashes { + hashes = append(hashes, hash) + } p.getBlocks(hashes) return nil diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 1b63a5ffb2..eae5670521 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -1,201 +1,349 @@ package downloader import ( + "errors" "fmt" - "math" "sync" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "gopkg.in/fatih/set.v0" + "gopkg.in/karalabe/cookiejar.v2/collections/prque" ) +const ( + blockCacheLimit = 4096 // Maximum number of blocks to cache before throttling the download +) + +// fetchRequest is a currently running block retrieval operation. +type fetchRequest struct { + Peer *peer // Peer to which the request was sent + Hashes map[common.Hash]int // Requested hashes with their insertion index (priority) + Time time.Time // Time when the request was made +} + // queue represents hashes that are either need fetching or are being fetched type queue struct { - hashPool *set.Set - fetchPool *set.Set - blockHashes *set.Set + hashPool map[common.Hash]int // Pending hashes, mapping to their insertion index (priority) + hashQueue *prque.Prque // Priority queue of the block hashes to fetch + hashCounter int // Counter indexing the added hashes to ensure retrieval order + + pendPool map[string]*fetchRequest // Currently pending block retrieval operations + pendCount int // Number of pending block fetches (to throttle the download) - mu sync.Mutex - fetching map[string]*chunk + blockPool map[common.Hash]int // Hash-set of the downloaded data blocks, mapping to cache indexes + blockCache []*types.Block // Downloaded but not yet delivered blocks + blockOffset int // Offset of the first cached block in the block-chain - blockOffset int - blocks []*types.Block + lock sync.RWMutex } -func newqueue() *queue { +// newQueue creates a new download queue for scheduling block retrieval. +func newQueue() *queue { return &queue{ - hashPool: set.New(), - fetchPool: set.New(), - blockHashes: set.New(), - fetching: make(map[string]*chunk), + hashPool: make(map[common.Hash]int), + hashQueue: prque.New(), + pendPool: make(map[string]*fetchRequest), + blockPool: make(map[common.Hash]int), } } -func (c *queue) reset() { - c.mu.Lock() - defer c.mu.Unlock() +// Reset clears out the queue contents. +func (q *queue) Reset() { + q.lock.Lock() + defer q.lock.Unlock() - c.resetNoTS() + q.hashPool = make(map[common.Hash]int) + q.hashQueue.Reset() + q.hashCounter = 0 + + q.pendPool = make(map[string]*fetchRequest) + q.pendCount = 0 + + q.blockPool = make(map[common.Hash]int) + q.blockOffset = 0 + q.blockCache = nil } -func (c *queue) resetNoTS() { - c.blockOffset = 0 - c.hashPool.Clear() - c.fetchPool.Clear() - c.blockHashes.Clear() - c.blocks = nil - c.fetching = make(map[string]*chunk) + +// Done checks if all the downloads have been retrieved, wiping the queue. +func (q *queue) Done() { + q.lock.Lock() + defer q.lock.Unlock() + + if len(q.blockCache) == 0 { + q.Reset() + } } -func (c *queue) size() int { - return c.hashPool.Size() + c.blockHashes.Size() + c.fetchPool.Size() +// Size retrieves the number of hashes in the queue, returning separately for +// pending and already downloaded. +func (q *queue) Size() (int, int) { + q.lock.RLock() + defer q.lock.RUnlock() + + return len(q.hashPool), len(q.blockPool) } -// reserve a `max` set of hashes for `p` peer. -func (c *queue) get(p *peer, max int) *chunk { - c.mu.Lock() - defer c.mu.Unlock() +// Pending retrieves the number of hashes pending for retrieval. +func (q *queue) Pending() int { + q.lock.RLock() + defer q.lock.RUnlock() - // return nothing if the pool has been depleted - if c.hashPool.Size() == 0 { - return nil - } + return q.hashQueue.Size() +} - limit := int(math.Min(float64(max), float64(c.hashPool.Size()))) - // Create a new set of hashes - hashes, i := set.New(), 0 - c.hashPool.Each(func(v interface{}) bool { - // break on limit - if i == limit { - return false - } - // skip any hashes that have previously been requested from the peer - if p.ignored.Has(v) { - return true - } +// InFlight retrieves the number of fetch requests currently in flight. +func (q *queue) InFlight() int { + q.lock.RLock() + defer q.lock.RUnlock() - hashes.Add(v) - i++ + return len(q.pendPool) +} +// Throttle checks if the download should be throttled (active block fetches +// exceed block cache). +func (q *queue) Throttle() bool { + q.lock.RLock() + defer q.lock.RUnlock() + + return q.pendCount >= len(q.blockCache)-len(q.blockPool) +} + +// Has checks if a hash is within the download queue or not. +func (q *queue) Has(hash common.Hash) bool { + q.lock.RLock() + defer q.lock.RUnlock() + + if _, ok := q.hashPool[hash]; ok { + return true + } + if _, ok := q.blockPool[hash]; ok { return true - }) - // if no hashes can be requested return a nil chunk - if hashes.Size() == 0 { - return nil } + return false +} - // remove the fetchable hashes from hash pool - c.hashPool.Separate(hashes) - c.fetchPool.Merge(hashes) +// Insert adds a set of hashes for the download queue for scheduling. +func (q *queue) Insert(hashes []common.Hash) { + q.lock.Lock() + defer q.lock.Unlock() - // Create a new chunk for the seperated hashes. The time is being used - // to reset the chunk (timeout) - chunk := &chunk{p, hashes, time.Now()} - // register as 'fetching' state - c.fetching[p.id] = chunk + // Insert all the hashes prioritized in the arrival order + for i, hash := range hashes { + index := q.hashCounter + i - // create new chunk for peer - return chunk + q.hashPool[hash] = index + q.hashQueue.Push(hash, float32(index)) // Highest gets schedules first + } + // Update the hash counter for the next batch of inserts + q.hashCounter += len(hashes) } -func (c *queue) has(hash common.Hash) bool { - return c.hashPool.Has(hash) || c.fetchPool.Has(hash) || c.blockHashes.Has(hash) +// GetHeadBlock retrieves the first block from the cache, or nil if it hasn't +// been downloaded yet (or simply non existent). +func (q *queue) GetHeadBlock() *types.Block { + q.lock.RLock() + defer q.lock.RUnlock() + + if len(q.blockCache) == 0 { + return nil + } + return q.blockCache[0] } -func (c *queue) getBlock(hash common.Hash) *types.Block { - c.mu.Lock() - defer c.mu.Unlock() +// GetBlock retrieves a downloaded block, or nil if non-existent. +func (q *queue) GetBlock(hash common.Hash) *types.Block { + q.lock.RLock() + defer q.lock.RUnlock() - if !c.blockHashes.Has(hash) { + // Short circuit if the block hasn't been downloaded yet + index, ok := q.blockPool[hash] + if !ok { return nil } - - for _, block := range c.blocks { - if block.Hash() == hash { - return block - } + // Return the block if it's still available in the cache + if q.blockOffset <= index && index < q.blockOffset+len(q.blockCache) { + return q.blockCache[index-q.blockOffset] } return nil } -// deliver delivers a chunk to the queue that was requested of the peer -func (c *queue) deliver(id string, blocks []*types.Block) (err error) { - c.mu.Lock() - defer c.mu.Unlock() - - chunk := c.fetching[id] - // If the chunk was never requested simply ignore it - if chunk != nil { - delete(c.fetching, id) - // check the length of the returned blocks. If the length of blocks is 0 - // we'll assume the peer doesn't know about the chain. - if len(blocks) == 0 { - // So we can ignore the blocks we didn't know about - chunk.peer.ignored.Merge(chunk.hashes) - } +// TakeBlocks retrieves and permanently removes a batch of blocks from the cache. +// The head parameter is required to prevent a race condition where concurrent +// takes may fail parent verifications. +func (q *queue) TakeBlocks(head *types.Block) types.Blocks { + q.lock.Lock() + defer q.lock.Unlock() - // Add the blocks - for i, block := range blocks { - // See (1) for future limitation - n := int(block.NumberU64()) - c.blockOffset - if n > len(c.blocks) || n < 0 { - // set the error and set the blocks which could be processed - // abort the rest of the blocks (FIXME this could be improved) - err = fmt.Errorf("received block which overflow (N=%v O=%v)", block.Number(), c.blockOffset) - blocks = blocks[:i] - break - } - c.blocks[n] = block + // Short circuit if the head block's different + if len(q.blockCache) == 0 || q.blockCache[0] != head { + return nil + } + // Otherwise accumulate all available blocks + var blocks types.Blocks + for _, block := range q.blockCache { + if block == nil { + break } - // seperate the blocks and the hashes - blockHashes := chunk.fetchedHashes(blocks) - // merge block hashes - c.blockHashes.Merge(blockHashes) - // Add back whatever couldn't be delivered - c.hashPool.Merge(chunk.hashes) - // Remove the hashes from the fetch pool - c.fetchPool.Separate(chunk.hashes) + blocks = append(blocks, block) + delete(q.blockPool, block.Hash()) + } + // Delete the blocks from the slice and let them be garbage collected + // without this slice trick the blocks would stay in memory until nil + // would be assigned to q.blocks + copy(q.blockCache, q.blockCache[len(blocks):]) + for k, n := len(q.blockCache)-len(blocks), len(q.blockCache); k < n; k++ { + q.blockCache[k] = nil } + q.blockOffset += len(blocks) - return + return blocks } -func (c *queue) alloc(offset, size int) { - c.mu.Lock() - defer c.mu.Unlock() +// Reserve reserves a set of hashes for the given peer, skipping any previously +// failed download. +func (q *queue) Reserve(p *peer, max int) *fetchRequest { + q.lock.Lock() + defer q.lock.Unlock() - if c.blockOffset < offset { - c.blockOffset = offset + // Short circuit if the pool has been depleted + if q.hashQueue.Empty() { + return nil } - - // (1) XXX at some point we could limit allocation to memory and use the disk - // to store future blocks. - if len(c.blocks) < size { - c.blocks = append(c.blocks, make([]*types.Block, size)...) + // Retrieve a batch of hashes, skipping previously failed ones + send := make(map[common.Hash]int) + skip := make(map[common.Hash]int) + + for len(send) < max && !q.hashQueue.Empty() { + hash, priority := q.hashQueue.Pop() + if p.ignored.Has(hash) { + skip[hash.(common.Hash)] = int(priority) + } else { + send[hash.(common.Hash)] = int(priority) + } + } + // Merge all the skipped hashes back + for hash, index := range skip { + q.hashQueue.Push(hash, float32(index)) + } + // Assemble and return the block download request + if len(send) == 0 { + return nil } + request := &fetchRequest{ + Peer: p, + Hashes: send, + Time: time.Now(), + } + q.pendPool[p.id] = request + q.pendCount += len(request.Hashes) + + return request } -// puts puts sets of hashes on to the queue for fetching -func (c *queue) put(hashes *set.Set) { - c.mu.Lock() - defer c.mu.Unlock() +// Cancel aborts a fetch request, returning all pending hashes to the queue. +func (q *queue) Cancel(request *fetchRequest) { + q.lock.Lock() + defer q.lock.Unlock() - c.hashPool.Merge(hashes) + for hash, index := range request.Hashes { + q.hashQueue.Push(hash, float32(index)) + } + delete(q.pendPool, request.Peer.id) + q.pendCount -= len(request.Hashes) } -type chunk struct { - peer *peer - hashes *set.Set - itime time.Time +// Expire checks for in flight requests that exceeded a timeout allowance, +// canceling them and returning the responsible peers for penalization. +func (q *queue) Expire(timeout time.Duration) []string { + q.lock.Lock() + defer q.lock.Unlock() + + // Iterate over the expired requests and return each to the queue + peers := []string{} + for id, request := range q.pendPool { + if time.Since(request.Time) > timeout { + for hash, index := range request.Hashes { + q.hashQueue.Push(hash, float32(index)) + } + q.pendCount -= len(request.Hashes) + peers = append(peers, id) + } + } + // Remove the expired requests from the pending pool + for _, id := range peers { + delete(q.pendPool, id) + } + return peers } -func (ch *chunk) fetchedHashes(blocks []*types.Block) *set.Set { - fhashes := set.New() +// Deliver injects a block retrieval response into the download queue. +func (q *queue) Deliver(id string, blocks []*types.Block) (err error) { + q.lock.Lock() + defer q.lock.Unlock() + + // Short circuit if the blocks were never requested + request := q.pendPool[id] + if request == nil { + return errors.New("no fetches pending") + } + delete(q.pendPool, id) + + // Mark all the hashes in the request as non-pending + q.pendCount -= len(request.Hashes) + + // If no blocks were retrieved, mark them as unavailable for the origin peer + if len(blocks) == 0 { + for hash, _ := range request.Hashes { + request.Peer.ignored.Add(hash) + } + } + // Iterate over the downloaded blocks and add each of them + errs := make([]error, 0) for _, block := range blocks { - fhashes.Add(block.Hash()) + // Skip any blocks that fall outside the cache range + index := int(block.NumberU64()) - q.blockOffset + if index >= len(q.blockCache) || index < 0 { + //fmt.Printf("block cache overflown (N=%v O=%v, C=%v)", block.Number(), q.blockOffset, len(q.blockCache)) + continue + } + // Skip any blocks that were not requested + hash := block.Hash() + if _, ok := request.Hashes[hash]; !ok { + errs = append(errs, fmt.Errorf("non-requested block %v", hash)) + continue + } + // Otherwise merge the block and mark the hash block + q.blockCache[index] = block + + delete(request.Hashes, hash) + delete(q.hashPool, hash) + q.blockPool[hash] = int(block.NumberU64()) } - ch.hashes.Separate(fhashes) + // Return all failed fetches to the queue + for hash, index := range request.Hashes { + q.hashQueue.Push(hash, float32(index)) + } + if len(errs) != 0 { + return fmt.Errorf("multiple failures: %v", errs) + } + return nil +} - return fhashes +// Alloc ensures that the block cache is the correct size, given a starting +// offset, and a memory cap. +func (q *queue) Alloc(offset int) { + q.lock.Lock() + defer q.lock.Unlock() + + if q.blockOffset < offset { + q.blockOffset = offset + } + size := len(q.hashPool) + if size > blockCacheLimit { + size = blockCacheLimit + } + if len(q.blockCache) < size { + q.blockCache = append(q.blockCache, make([]*types.Block, size-len(q.blockCache))...) + } } diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index b163bd9c77..b1f3591f36 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -32,31 +32,30 @@ func createBlocksFromHashSet(hashes *set.Set) []*types.Block { } func TestChunking(t *testing.T) { - queue := newqueue() + queue := newQueue() peer1 := newPeer("peer1", common.Hash{}, nil, nil) peer2 := newPeer("peer2", common.Hash{}, nil, nil) // 99 + 1 (1 == known genesis hash) hashes := createHashes(0, 99) - hashSet := createHashSet(hashes) - queue.put(hashSet) + queue.Insert(hashes) - chunk1 := queue.get(peer1, 99) + chunk1 := queue.Reserve(peer1, 99) if chunk1 == nil { t.Errorf("chunk1 is nil") t.FailNow() } - chunk2 := queue.get(peer2, 99) + chunk2 := queue.Reserve(peer2, 99) if chunk2 == nil { t.Errorf("chunk2 is nil") t.FailNow() } - if chunk1.hashes.Size() != 99 { - t.Error("expected chunk1 hashes to be 99, got", chunk1.hashes.Size()) + if len(chunk1.Hashes) != 99 { + t.Error("expected chunk1 hashes to be 99, got", len(chunk1.Hashes)) } - if chunk2.hashes.Size() != 1 { - t.Error("expected chunk1 hashes to be 1, got", chunk2.hashes.Size()) + if len(chunk2.Hashes) != 1 { + t.Error("expected chunk1 hashes to be 1, got", len(chunk2.Hashes)) } } From 45f8304f3c44e5379c7e30ab144d73e591e270af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 7 May 2015 12:59:19 +0300 Subject: [PATCH 02/19] eth/downloader: fix expiration not running while fetching --- eth/downloader/downloader.go | 40 ++++++++++++++++-------------------- eth/downloader/queue.go | 26 ++++++++++++----------- 2 files changed, 32 insertions(+), 34 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 608acf499a..25b2511127 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -346,13 +346,28 @@ out: d.peers.setState(blockPack.peerId, idleState) } case <-ticker.C: - // after removing bad peers make sure we actually have sufficient peer left to keep downloading + // Check for bad peers. Bad peers may indicate a peer not responding + // to a `getBlocks` message. A timeout of 5 seconds is set. Peers + // that badly or poorly behave are removed from the peer set (not banned). + // Bad peers are excluded from the available peer set and therefor won't be + // reused. XXX We could re-introduce peers after X time. + badPeers := d.queue.Expire(blockTtl) + for _, pid := range badPeers { + // XXX We could make use of a reputation system here ranking peers + // in their performance + // 1) Time for them to respond; + // 2) Measure their speed; + // 3) Amount and availability. + if peer := d.peers[pid]; peer != nil { + peer.demote() + peer.reset() + } + } + // After removing bad peers make sure we actually have sufficient peer left to keep downloading if len(d.peers) == 0 { d.queue.Reset() - return errNoPeers } - // If there are unrequested hashes left start fetching // from the available peers. if d.queue.Pending() > 0 { @@ -392,25 +407,6 @@ out: // safely assume we're done. Another part of the process will check // for parent errors and will re-request anything that's missing break out - } else { - // Check for bad peers. Bad peers may indicate a peer not responding - // to a `getBlocks` message. A timeout of 5 seconds is set. Peers - // that badly or poorly behave are removed from the peer set (not banned). - // Bad peers are excluded from the available peer set and therefor won't be - // reused. XXX We could re-introduce peers after X time. - badPeers := d.queue.Expire(blockTtl) - for _, pid := range badPeers { - // XXX We could make use of a reputation system here ranking peers - // in their performance - // 1) Time for them to respond; - // 2) Measure their speed; - // 3) Amount and availability. - if peer := d.peers[pid]; peer != nil { - peer.demote() - peer.reset() - } - } - } } } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index eae5670521..d849d4d68a 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -12,7 +12,7 @@ import ( ) const ( - blockCacheLimit = 4096 // Maximum number of blocks to cache before throttling the download + blockCacheLimit = 1024 // Maximum number of blocks to cache before throttling the download ) // fetchRequest is a currently running block retrieval operation. @@ -28,8 +28,7 @@ type queue struct { hashQueue *prque.Prque // Priority queue of the block hashes to fetch hashCounter int // Counter indexing the added hashes to ensure retrieval order - pendPool map[string]*fetchRequest // Currently pending block retrieval operations - pendCount int // Number of pending block fetches (to throttle the download) + pendPool map[string]*fetchRequest // Currently pending block retrieval operations blockPool map[common.Hash]int // Hash-set of the downloaded data blocks, mapping to cache indexes blockCache []*types.Block // Downloaded but not yet delivered blocks @@ -58,7 +57,6 @@ func (q *queue) Reset() { q.hashCounter = 0 q.pendPool = make(map[string]*fetchRequest) - q.pendCount = 0 q.blockPool = make(map[common.Hash]int) q.blockOffset = 0 @@ -106,7 +104,13 @@ func (q *queue) Throttle() bool { q.lock.RLock() defer q.lock.RUnlock() - return q.pendCount >= len(q.blockCache)-len(q.blockPool) + // Calculate the currently in-flight block requests + pending := 0 + for _, request := range q.pendPool { + pending += len(request.Hashes) + } + // Throttle if more blocks are in-flight than free space in the cache + return pending >= len(q.blockCache)-len(q.blockPool) } // Has checks if a hash is within the download queue or not. @@ -206,10 +210,14 @@ func (q *queue) Reserve(p *peer, max int) *fetchRequest { q.lock.Lock() defer q.lock.Unlock() - // Short circuit if the pool has been depleted + // Short circuit if the pool has been depleted, or if the peer's already + // downloading something (sanity check not to corrupt state) if q.hashQueue.Empty() { return nil } + if _, ok := q.pendPool[p.id]; ok { + return nil + } // Retrieve a batch of hashes, skipping previously failed ones send := make(map[common.Hash]int) skip := make(map[common.Hash]int) @@ -236,7 +244,6 @@ func (q *queue) Reserve(p *peer, max int) *fetchRequest { Time: time.Now(), } q.pendPool[p.id] = request - q.pendCount += len(request.Hashes) return request } @@ -250,7 +257,6 @@ func (q *queue) Cancel(request *fetchRequest) { q.hashQueue.Push(hash, float32(index)) } delete(q.pendPool, request.Peer.id) - q.pendCount -= len(request.Hashes) } // Expire checks for in flight requests that exceeded a timeout allowance, @@ -266,7 +272,6 @@ func (q *queue) Expire(timeout time.Duration) []string { for hash, index := range request.Hashes { q.hashQueue.Push(hash, float32(index)) } - q.pendCount -= len(request.Hashes) peers = append(peers, id) } } @@ -289,9 +294,6 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) { } delete(q.pendPool, id) - // Mark all the hashes in the request as non-pending - q.pendCount -= len(request.Hashes) - // If no blocks were retrieved, mark them as unavailable for the origin peer if len(blocks) == 0 { for hash, _ := range request.Hashes { From 43901c92825389b694fb5488c520cf5122f022de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 7 May 2015 14:40:50 +0300 Subject: [PATCH 03/19] eth/downloader: fix priority queue reset, add throttling test --- Godeps/Godeps.json | 2 +- .../cookiejar.v2/collections/prque/prque.go | 2 +- .../collections/prque/prque_test.go | 43 ++++++++++++++--- .../cookiejar.v2/collections/prque/sstack.go | 7 +-- .../collections/prque/sstack_test.go | 30 +++++++++--- eth/downloader/downloader_test.go | 48 +++++++++++++++++++ 6 files changed, 111 insertions(+), 21 deletions(-) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index a5b27e76c5..012475c08d 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -100,7 +100,7 @@ }, { "ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque", - "Rev": "cf5d8079df7c4501217638e1e3a6e43f94822548" + "Rev": "0b2e270613f5d7ba262a5749b9e32270131497a2" }, { "ImportPath": "gopkg.in/qml.v1/cdata", diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go index 8225e8c53b..a1009f3bec 100644 --- a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque.go @@ -71,5 +71,5 @@ func (p *Prque) Size() int { // Clears the contents of the priority queue. func (p *Prque) Reset() { - p.cont.Reset() + *p = *New() } diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go index 811c53c737..daba691e1b 100644 --- a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/prque_test.go @@ -61,16 +61,45 @@ func TestPrque(t *testing.T) { } func TestReset(t *testing.T) { - // Fill the queue with some random data + // Generate a batch of random data and a specific priority order size := 16 * blockSize - queue := New() + prio := rand.Perm(size) + data := make([]int, size) for i := 0; i < size; i++ { - queue.Push(rand.Int(), rand.Float32()) + data[i] = rand.Int() } - // Reset and ensure it's empty - queue.Reset() - if !queue.Empty() { - t.Errorf("priority queue not empty after reset: %v", queue) + queue := New() + for rep := 0; rep < 2; rep++ { + // Fill a priority queue with the above data + for i := 0; i < size; i++ { + queue.Push(data[i], float32(prio[i])) + if queue.Size() != i+1 { + t.Errorf("queue size mismatch: have %v, want %v.", queue.Size(), i+1) + } + } + // Create a map the values to the priorities for easier verification + dict := make(map[float32]int) + for i := 0; i < size; i++ { + dict[float32(prio[i])] = data[i] + } + // Pop out half the elements in priority order and verify them + prevPrio := float32(size + 1) + for i := 0; i < size/2; i++ { + val, prio := queue.Pop() + if prio > prevPrio { + t.Errorf("invalid priority order: %v after %v.", prio, prevPrio) + } + prevPrio = prio + if val != dict[prio] { + t.Errorf("push/pop mismatch: have %v, want %v.", val, dict[prio]) + } + delete(dict, prio) + } + // Reset and ensure it's empty + queue.Reset() + if !queue.Empty() { + t.Errorf("priority queue not empty after reset: %v", queue) + } } } diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go index 55375a0910..c11347f9d6 100644 --- a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack.go @@ -88,7 +88,7 @@ func (s *sstack) Less(i, j int) bool { return s.blocks[i/blockSize][i%blockSize].priority > s.blocks[j/blockSize][j%blockSize].priority } -// Swapts two elements in the stack. Required by sort.Interface. +// Swaps two elements in the stack. Required by sort.Interface. func (s *sstack) Swap(i, j int) { ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize s.blocks[ib][io], s.blocks[jb][jo] = s.blocks[jb][jo], s.blocks[ib][io] @@ -96,8 +96,5 @@ func (s *sstack) Swap(i, j int) { // Resets the stack, effectively clearing its contents. func (s *sstack) Reset() { - s.size = 0 - s.offset = 0 - s.active = s.blocks[0] - s.capacity = blockSize + *s = *newSstack() } diff --git a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go index 7bdc08bf5d..bcb5b830bd 100644 --- a/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go +++ b/Godeps/_workspace/src/gopkg.in/karalabe/cookiejar.v2/collections/prque/sstack_test.go @@ -79,15 +79,31 @@ func TestSstackSort(t *testing.T) { } func TestSstackReset(t *testing.T) { - // Push some stuff onto the stack + // Create some initial data size := 16 * blockSize - stack := newSstack() + data := make([]*item, size) for i := 0; i < size; i++ { - stack.Push(&item{i, float32(i)}) + data[i] = &item{rand.Int(), rand.Float32()} } - // Clear and verify - stack.Reset() - if stack.Len() != 0 { - t.Errorf("stack not empty after reset: %v", stack) + stack := newSstack() + for rep := 0; rep < 2; rep++ { + // Push all the data into the stack, pop out every second + secs := []*item{} + for i := 0; i < size; i++ { + stack.Push(data[i]) + if i%2 == 0 { + secs = append(secs, stack.Pop().(*item)) + } + } + // Reset and verify both pulled and stack contents + stack.Reset() + if stack.Len() != 0 { + t.Errorf("stack not empty after reset: %v", stack) + } + for i := 0; i < size; i++ { + if i%2 == 0 && data[i] != secs[i/2] { + t.Errorf("push/pop mismatch: have %v, want %v.", secs[i/2], data[i]) + } + } } } diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 11834d7885..bd439d96a4 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -181,3 +181,51 @@ func TestTaking(t *testing.T) { t.Error("expected to take 1000, got", len(bs1)) } } + +func TestThrottling(t *testing.T) { + minDesiredPeerCount = 4 + blockTtl = 1 * time.Second + + targetBlocks := 4 * blockCacheLimit + hashes := createHashes(0, targetBlocks) + blocks := createBlocksFromHashes(hashes) + tester := newTester(t, hashes, blocks) + + tester.newPeer("peer1", big.NewInt(10000), hashes[0]) + tester.newPeer("peer2", big.NewInt(0), common.Hash{}) + tester.badBlocksPeer("peer3", big.NewInt(0), common.Hash{}) + tester.badBlocksPeer("peer4", big.NewInt(0), common.Hash{}) + + // Concurrently download and take the blocks + errc := make(chan error, 1) + go func() { + errc <- tester.sync("peer1", hashes[0]) + }() + + done := make(chan struct{}) + took := []*types.Block{} + go func() { + for { + select { + case <-done: + took = append(took, tester.downloader.TakeBlocks()...) + done <- struct{}{} + return + default: + took = append(took, tester.downloader.TakeBlocks()...) + } + } + }() + + // Synchronize the two threads and verify + err := <-errc + done <- struct{}{} + <-done + + if err != nil { + t.Fatalf("failed to synchronize blocks: %v", err) + } + if len(took) != targetBlocks { + t.Fatalf("downloaded block mismatch: have %v, want %v", len(took), targetBlocks) + } +} From 3953bf0031b6b2a4302b333aa65fc8ccaa7d788c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 30 Apr 2015 15:06:05 +0300 Subject: [PATCH 04/19] p2p: limit the outbound dialing too --- p2p/server.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/p2p/server.go b/p2p/server.go index 5e0c917fc4..5424b54632 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -27,6 +27,9 @@ const ( // 'added as peer'. maxAcceptConns = 50 + // Maximum number of concurrently dialing outbound connections. + maxDialingConns = 50 + // total timeout for encryption handshake and protocol // handshake in both directions. handshakeTimeout = 5 * time.Second @@ -401,7 +404,11 @@ func (srv *Server) dialLoop() { defer srv.loopWG.Done() defer refresh.Stop() - // TODO: maybe limit number of active dials + // Limit the number of concurrent dials + slots := make(chan struct{}, maxDialingConns) + for i := 0; i < maxDialingConns; i++ { + slots <- struct{}{} + } dial := func(dest *discover.Node) { // Don't dial nodes that would fail the checks in addPeer. // This is important because the connection handshake is a lot @@ -413,6 +420,9 @@ func (srv *Server) dialLoop() { if !ok || dialing[dest.ID] { return } + // Request a dial slot to prevent CPU exhaustion + <-slots + defer func() { slots <- struct{}{} }() dialing[dest.ID] = true srv.peerWG.Add(1) From 29fef349efd87dcca76b0593e6b68ca9f3ccf2cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 30 Apr 2015 15:21:09 +0300 Subject: [PATCH 05/19] p2p: fix a dialing race in the throttler --- p2p/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/server.go b/p2p/server.go index 5424b54632..16768f920f 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -422,13 +422,13 @@ func (srv *Server) dialLoop() { } // Request a dial slot to prevent CPU exhaustion <-slots - defer func() { slots <- struct{}{} }() dialing[dest.ID] = true srv.peerWG.Add(1) go func() { srv.dialNode(dest) dialed <- dest + slots <- struct{}{} }() } From 2060bc8bac0e803c661e0c0b233284ce52630c1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 30 Apr 2015 17:12:23 +0300 Subject: [PATCH 06/19] p2p: fix dial throttling race condition --- p2p/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/server.go b/p2p/server.go index 16768f920f..b7a92ce553 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -427,8 +427,8 @@ func (srv *Server) dialLoop() { srv.peerWG.Add(1) go func() { srv.dialNode(dest) - dialed <- dest slots <- struct{}{} + dialed <- dest }() } From af932177755f5f839ab29b16dc490d3e1bb3708d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 30 Apr 2015 17:39:08 +0300 Subject: [PATCH 07/19] p2p: reduce the concurrent handshakes to 10/10 in/out --- p2p/server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/p2p/server.go b/p2p/server.go index b7a92ce553..164aaba37b 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -25,10 +25,10 @@ const ( // This is the maximum number of inbound connection // that are allowed to linger between 'accepted' and // 'added as peer'. - maxAcceptConns = 50 + maxAcceptConns = 10 // Maximum number of concurrently dialing outbound connections. - maxDialingConns = 50 + maxDialingConns = 10 // total timeout for encryption handshake and protocol // handshake in both directions. From 4d5a719f256d7dfbaab2cc9c632cd7996067508f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 4 May 2015 17:35:49 +0300 Subject: [PATCH 08/19] cmd, eth, p2p: introduce pending peer cli arg, add tests --- cmd/geth/main.go | 1 + cmd/mist/main.go | 1 + cmd/utils/flags.go | 6 +++ eth/backend.go | 26 ++++----- p2p/server.go | 25 ++++++--- p2p/server_test.go | 130 +++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 170 insertions(+), 19 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 92c3b7a905..284f78c3e5 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -242,6 +242,7 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso utils.JSpathFlag, utils.ListenPortFlag, utils.MaxPeersFlag, + utils.MaxPendingPeersFlag, utils.EtherbaseFlag, utils.MinerThreadsFlag, utils.MiningEnabledFlag, diff --git a/cmd/mist/main.go b/cmd/mist/main.go index 1030d6ada1..9d92cc1754 100644 --- a/cmd/mist/main.go +++ b/cmd/mist/main.go @@ -75,6 +75,7 @@ func init() { utils.LogFileFlag, utils.LogLevelFlag, utils.MaxPeersFlag, + utils.MaxPendingPeersFlag, utils.MinerThreadsFlag, utils.NATFlag, utils.NodeKeyFileFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 1fdc8dfe51..0a1d650d62 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -197,6 +197,11 @@ var ( Usage: "Maximum number of network peers (network disabled if set to 0)", Value: 16, } + MaxPendingPeersFlag = cli.IntFlag{ + Name: "maxpendpeers", + Usage: "Maximum number of pending connection attempts (defaults used if set to 0)", + Value: 0, + } ListenPortFlag = cli.IntFlag{ Name: "port", Usage: "Network listening port", @@ -286,6 +291,7 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config { AccountManager: GetAccountManager(ctx), VmDebug: ctx.GlobalBool(VMDebugFlag.Name), MaxPeers: ctx.GlobalInt(MaxPeersFlag.Name), + MaxPendingPeers: ctx.GlobalInt(MaxPendingPeersFlag.Name), Port: ctx.GlobalString(ListenPortFlag.Name), NAT: GetNAT(ctx), NatSpec: ctx.GlobalBool(NatspecEnabledFlag.Name), diff --git a/eth/backend.go b/eth/backend.go index 791336d75d..0f23cde2fd 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -60,8 +60,9 @@ type Config struct { VmDebug bool NatSpec bool - MaxPeers int - Port string + MaxPeers int + MaxPendingPeers int + Port string // Space-separated list of discovery node URLs BootNodes string @@ -280,16 +281,17 @@ func New(config *Config) (*Ethereum, error) { protocols = append(protocols, eth.whisper.Protocol()) } eth.net = &p2p.Server{ - PrivateKey: netprv, - Name: config.Name, - MaxPeers: config.MaxPeers, - Protocols: protocols, - NAT: config.NAT, - NoDial: !config.Dial, - BootstrapNodes: config.parseBootNodes(), - StaticNodes: config.parseNodes(staticNodes), - TrustedNodes: config.parseNodes(trustedNodes), - NodeDatabase: nodeDb, + PrivateKey: netprv, + Name: config.Name, + MaxPeers: config.MaxPeers, + MaxPendingPeers: config.MaxPendingPeers, + Protocols: protocols, + NAT: config.NAT, + NoDial: !config.Dial, + BootstrapNodes: config.parseBootNodes(), + StaticNodes: config.parseNodes(staticNodes), + TrustedNodes: config.parseNodes(trustedNodes), + NodeDatabase: nodeDb, } if len(config.Port) > 0 { eth.net.ListenAddr = ":" + config.Port diff --git a/p2p/server.go b/p2p/server.go index 164aaba37b..77f66f1673 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -22,9 +22,7 @@ const ( refreshPeersInterval = 30 * time.Second staticPeerCheckInterval = 15 * time.Second - // This is the maximum number of inbound connection - // that are allowed to linger between 'accepted' and - // 'added as peer'. + // Maximum number of concurrently handshaking inbound connections. maxAcceptConns = 10 // Maximum number of concurrently dialing outbound connections. @@ -55,6 +53,11 @@ type Server struct { // connected. It must be greater than zero. MaxPeers int + // MaxPendingPeers is the maximum number of peers that can be pending in the + // handshake phase, counted separately for inbound and outbound connections. + // Zero defaults to preset values. + MaxPendingPeers int + // Name sets the node name of this server. // Use common.MakeName to create a name that follows existing conventions. Name string @@ -334,8 +337,12 @@ func (srv *Server) listenLoop() { // This channel acts as a semaphore limiting // active inbound connections that are lingering pre-handshake. // If all slots are taken, no further connections are accepted. - slots := make(chan struct{}, maxAcceptConns) - for i := 0; i < maxAcceptConns; i++ { + tokens := maxAcceptConns + if srv.MaxPendingPeers > 0 { + tokens = srv.MaxPendingPeers + } + slots := make(chan struct{}, tokens) + for i := 0; i < tokens; i++ { slots <- struct{}{} } @@ -405,8 +412,12 @@ func (srv *Server) dialLoop() { defer refresh.Stop() // Limit the number of concurrent dials - slots := make(chan struct{}, maxDialingConns) - for i := 0; i < maxDialingConns; i++ { + tokens := maxAcceptConns + if srv.MaxPendingPeers > 0 { + tokens = srv.MaxPendingPeers + } + slots := make(chan struct{}, tokens) + for i := 0; i < tokens; i++ { slots <- struct{}{} } dial := func(dest *discover.Node) { diff --git a/p2p/server_test.go b/p2p/server_test.go index 3f9db343cc..85a5a98cd5 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -369,6 +369,136 @@ func TestServerTrustedPeers(t *testing.T) { } } +// Tests that a failed dial will temporarily throttle a peer. +func TestServerMaxPendingDials(t *testing.T) { + defer testlog(t).detach() + + // Start a simple test server + server := &Server{ + ListenAddr: "127.0.0.1:0", + PrivateKey: newkey(), + MaxPeers: 10, + MaxPendingPeers: 1, + } + if err := server.Start(); err != nil { + t.Fatal("failed to start test server: %v", err) + } + defer server.Stop() + + // Simulate two separate remote peers + peers := make(chan *discover.Node, 2) + conns := make(chan net.Conn, 2) + for i := 0; i < 2; i++ { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("listener %d: failed to setup: %v", i, err) + } + defer listener.Close() + + addr := listener.Addr().(*net.TCPAddr) + peers <- &discover.Node{ + ID: discover.PubkeyID(&newkey().PublicKey), + IP: addr.IP, + TCP: uint16(addr.Port), + } + go func() { + conn, err := listener.Accept() + if err == nil { + conns <- conn + } + }() + } + // Request a dial for both peers + go func() { + for i := 0; i < 2; i++ { + server.staticDial <- <-peers // hack piggybacking the static implementation + } + }() + + // Make sure only one outbound connection goes through + var conn net.Conn + + select { + case conn = <-conns: + case <-time.After(100 * time.Millisecond): + t.Fatalf("first dial timeout") + } + select { + case conn = <-conns: + t.Fatalf("second dial completed prematurely") + case <-time.After(100 * time.Millisecond): + } + // Finish the first dial, check the second + conn.Close() + select { + case conn = <-conns: + conn.Close() + + case <-time.After(100 * time.Millisecond): + t.Fatalf("second dial timeout") + } +} + +func TestServerMaxPendingAccepts(t *testing.T) { + defer testlog(t).detach() + + // Start a test server and a peer sink for synchronization + started := make(chan *Peer) + server := &Server{ + ListenAddr: "127.0.0.1:0", + PrivateKey: newkey(), + MaxPeers: 10, + MaxPendingPeers: 1, + NoDial: true, + newPeerHook: func(p *Peer) { started <- p }, + } + if err := server.Start(); err != nil { + t.Fatal("failed to start test server: %v", err) + } + defer server.Stop() + + // Try and connect to the server on multiple threads concurrently + conns := make([]net.Conn, 2) + for i := 0; i < 2; i++ { + dialer := &net.Dialer{Deadline: time.Now().Add(3 * time.Second)} + + conn, err := dialer.Dial("tcp", server.ListenAddr) + if err != nil { + t.Fatalf("failed to dial server: %v", err) + } + conns[i] = conn + } + // Check that a handshake on the second doesn't pass + go func() { + key := newkey() + shake := &protoHandshake{Version: baseProtocolVersion, ID: discover.PubkeyID(&key.PublicKey)} + if _, err := setupConn(conns[1], key, shake, server.Self(), false, server.trustedNodes); err != nil { + t.Fatalf("failed to run handshake: %v", err) + } + }() + select { + case <-started: + t.Fatalf("handshake on second connection accepted") + + case <-time.After(100 * time.Millisecond): + } + // Shake on first, check that both go through + go func() { + key := newkey() + shake := &protoHandshake{Version: baseProtocolVersion, ID: discover.PubkeyID(&key.PublicKey)} + if _, err := setupConn(conns[0], key, shake, server.Self(), false, server.trustedNodes); err != nil { + t.Fatalf("failed to run handshake: %v", err) + } + }() + for i := 0; i < 2; i++ { + select { + case <-started: + case <-time.After(100 * time.Millisecond): + t.Fatalf("peer %d: handshake timeout", i) + } + } +} + func newkey() *ecdsa.PrivateKey { key, err := crypto.GenerateKey() if err != nil { From 8735e5addda74882da66deab8cf038be1fb3ed3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Mon, 4 May 2015 17:44:46 +0300 Subject: [PATCH 09/19] p2p: increase the handshake timeout in the tests --- p2p/server_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/p2p/server_test.go b/p2p/server_test.go index 85a5a98cd5..a5e56fa186 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -480,7 +480,7 @@ func TestServerMaxPendingAccepts(t *testing.T) { case <-started: t.Fatalf("handshake on second connection accepted") - case <-time.After(100 * time.Millisecond): + case <-time.After(time.Second): } // Shake on first, check that both go through go func() { @@ -493,7 +493,7 @@ func TestServerMaxPendingAccepts(t *testing.T) { for i := 0; i < 2; i++ { select { case <-started: - case <-time.After(100 * time.Millisecond): + case <-time.After(time.Second): t.Fatalf("peer %d: handshake timeout", i) } } From 9d188f73b58ee1fe4bda00a9536bda4056755f2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 7 May 2015 21:07:20 +0300 Subject: [PATCH 10/19] eth, eth/downloader: make synchronize thread safe --- eth/downloader/downloader.go | 72 +++++++------------------------ eth/downloader/downloader_test.go | 2 +- eth/downloader/queue.go | 10 ----- eth/handler.go | 4 +- eth/sync.go | 16 ++----- 5 files changed, 22 insertions(+), 82 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 25b2511127..ef2a193ffa 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -68,8 +68,7 @@ type Downloader struct { getBlock getBlockFn // Status - fetchingHashes int32 - downloadingBlocks int32 + synchronizing int32 // Channels newPeerCh chan *peer @@ -120,43 +119,26 @@ func (d *Downloader) UnregisterPeer(id string) { delete(d.peers, id) } -// SynchroniseWithPeer will select the peer and use it for synchronizing. If an empty string is given +// Synchronize will select the peer and use it for synchronizing. If an empty string is given // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) Synchronise(id string, hash common.Hash) error { - // Make sure it's doing neither. Once done we can restart the - // downloading process if the TD is higher. For now just get on - // with whatever is going on. This prevents unnecessary switching. - if d.isBusy() { - return errBusy +func (d *Downloader) Synchronize(id string, hash common.Hash) error { + // Make sure only one goroutine is ever allowed past this point at once + if !atomic.CompareAndSwapInt32(&d.synchronizing, 0, 1) { + return nil } + defer atomic.StoreInt32(&d.synchronizing, 0) - // When a synchronization attempt is made while the queue still - // contains items we abort the sync attempt - if done, pend := d.queue.Size(); done+pend > 0 { + // Abort if the queue still contains some leftover data + if _, cached := d.queue.Size(); cached > 0 { return errPendingQueue } - - // Fetch the peer using the id or throw an error if the peer couldn't be found + // Retrieve the origin peer and initiate the downloading process p := d.peers[id] if p == nil { return errUnknownPeer } - - // Get the hash from the peer and initiate the downloading progress. - err := d.getFromPeer(p, hash, false) - if err != nil { - return err - } - - return nil -} - -// Done lets the downloader know that whatever previous hashes were taken -// are processed. If the block count reaches zero and done is called -// we reset the queue for the next batch of incoming hashes and blocks. -func (d *Downloader) Done() { - d.queue.Done() + return d.getFromPeer(p, hash, false) } // TakeBlocks takes blocks from the queue and yields them to the blockTaker handler @@ -176,6 +158,7 @@ func (d *Downloader) Has(hash common.Hash) bool { } func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) (err error) { + d.activePeer = p.id defer func() { // reset on error @@ -184,7 +167,7 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) } }() - glog.V(logger.Detail).Infoln("Synchronising with the network using:", p.id) + glog.V(logger.Debug).Infoln("Synchronizing with the network using:", p.id) // Start the fetcher. This will block the update entirely // interupts need to be send to the appropriate channels // respectively. @@ -200,20 +183,13 @@ func (d *Downloader) getFromPeer(p *peer, hash common.Hash, ignoreInitial bool) return err } - glog.V(logger.Detail).Infoln("Sync completed") + glog.V(logger.Debug).Infoln("Synchronization completed") return nil } // XXX Make synchronous func (d *Downloader) startFetchingHashes(p *peer, h common.Hash, ignoreInitial bool) error { - atomic.StoreInt32(&d.fetchingHashes, 1) - defer atomic.StoreInt32(&d.fetchingHashes, 0) - - if d.queue.Has(h) { // TODO: Is this possible? Shouldn't queue be empty for startFetchingHashes to be even called? - return errAlreadyInPool - } - glog.V(logger.Debug).Infof("Downloading hashes (%x) from %s", h[:4], p.id) start := time.Now() @@ -312,10 +288,8 @@ out: } func (d *Downloader) startFetchingBlocks(p *peer) error { - glog.V(logger.Detail).Infoln("Downloading", d.queue.Pending(), "block(s)") + glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)") - atomic.StoreInt32(&d.downloadingBlocks, 1) - defer atomic.StoreInt32(&d.downloadingBlocks, 0) // Defer the peer reset. This will empty the peer requested set // and makes sure there are no lingering peers with an incorrect // state @@ -439,19 +413,3 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error { return nil } - -func (d *Downloader) isFetchingHashes() bool { - return atomic.LoadInt32(&d.fetchingHashes) == 1 -} - -func (d *Downloader) isDownloadingBlocks() bool { - return atomic.LoadInt32(&d.downloadingBlocks) == 1 -} - -func (d *Downloader) isBusy() bool { - return d.isFetchingHashes() || d.isDownloadingBlocks() -} - -func (d *Downloader) IsBusy() bool { - return d.isBusy() -} diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index bd439d96a4..f3402794b8 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -61,7 +61,7 @@ func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types func (dl *downloadTester) sync(peerId string, hash common.Hash) error { dl.activePeerId = peerId - return dl.downloader.Synchronise(peerId, hash) + return dl.downloader.Synchronize(peerId, hash) } func (dl *downloadTester) hasBlock(hash common.Hash) bool { diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index d849d4d68a..515440bca6 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -63,16 +63,6 @@ func (q *queue) Reset() { q.blockCache = nil } -// Done checks if all the downloads have been retrieved, wiping the queue. -func (q *queue) Done() { - q.lock.Lock() - defer q.lock.Unlock() - - if len(q.blockCache) == 0 { - q.Reset() - } -} - // Size retrieves the number of hashes in the queue, returning separately for // pending and already downloaded. func (q *queue) Size() (int, int) { diff --git a/eth/handler.go b/eth/handler.go index 1e06638166..b2018f3368 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -307,7 +307,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { // Attempt to insert the newly received by checking if the parent exists. // if the parent exists we process the block and propagate to our peers - // otherwise synchronise with the peer + // otherwise synchronize with the peer if self.chainman.HasBlock(request.Block.ParentHash()) { if _, err := self.chainman.InsertChain(types.Blocks{request.Block}); err != nil { glog.V(logger.Error).Infoln("removed peer (", p.id, ") due to block error") @@ -324,7 +324,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { } self.BroadcastBlock(hash, request.Block) } else { - go self.synchronise(p) + go self.synchronize(p) } default: return errResp(ErrInvalidMsgCode, "%v", msg.Code) diff --git a/eth/sync.go b/eth/sync.go index 9e8b21a7cf..b259c1d475 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -32,14 +32,14 @@ func (pm *ProtocolManager) update() { } itimer.Stop() - go pm.synchronise(peer) + go pm.synchronize(peer) case <-itimer.C: // The timer will make sure that the downloader keeps an active state // in which it attempts to always check the network for highest td peers // Either select the peer or restart the timer if no peers could // be selected. if peer := getBestPeer(pm.peers); peer != nil { - go pm.synchronise(peer) + go pm.synchronize(peer) } else { itimer.Reset(5 * time.Second) } @@ -63,7 +63,6 @@ func (pm *ProtocolManager) processBlocks() error { if len(blocks) == 0 { return nil } - defer pm.downloader.Done() glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number()) @@ -78,26 +77,19 @@ func (pm *ProtocolManager) processBlocks() error { return nil } -func (pm *ProtocolManager) synchronise(peer *peer) { +func (pm *ProtocolManager) synchronize(peer *peer) { // Make sure the peer's TD is higher than our own. If not drop. if peer.td.Cmp(pm.chainman.Td()) <= 0 { return } - // Check downloader if it's busy so it doesn't show the sync message - // for every attempty - if pm.downloader.IsBusy() { - return - } - // FIXME if we have the hash in our chain and the TD of the peer is // much higher than ours, something is wrong with us or the peer. // Check if the hash is on our own chain if pm.chainman.HasBlock(peer.recentHash) { return } - // Get the hashes from the peer (synchronously) - err := pm.downloader.Synchronise(peer.id, peer.recentHash) + err := pm.downloader.Synchronize(peer.id, peer.recentHash) if err != nil && err == downloader.ErrBadPeer { glog.V(logger.Debug).Infoln("removed peer from peer set due to bad action") pm.removePeer(peer) From 1de1359e3b99785ea94f7d75c8aa0dac6b034f24 Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Fri, 8 May 2015 12:27:35 +0200 Subject: [PATCH 11/19] Otto.ToValue concurrency error fixed --- jsre/jsre.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/jsre/jsre.go b/jsre/jsre.go index 7549b5e1e7..7de41428bd 100644 --- a/jsre/jsre.go +++ b/jsre/jsre.go @@ -286,7 +286,7 @@ func (self *JSRE) loadScript(call otto.FunctionCall) otto.Value { // uses the "prettyPrint" JS function to format a value func (self *JSRE) PrettyPrint(v interface{}) (val otto.Value, err error) { var method otto.Value - v, err = self.vm.ToValue(v) + v, err = self.ToValue(v) if err != nil { return } @@ -298,8 +298,22 @@ func (self *JSRE) PrettyPrint(v interface{}) (val otto.Value, err error) { } // creates an otto value from a go type +func (self *JSRE) ToValue(v interface{}) (otto.Value, error) { + done := make(chan bool) + req := &evalReq{ + fn: func(res *evalResult) { + res.result, res.err = self.vm.ToValue(v) + }, + done: done, + } + self.evalQueue <- req + <-done + return req.res.result, req.res.err +} + func (self *JSRE) ToVal(v interface{}) otto.Value { - result, err := self.vm.ToValue(v) + + result, err := self.ToValue(v) if err != nil { fmt.Println("Value unknown:", err) return otto.UndefinedValue() From d7dabce73291d2bb768625c7e465f2f645980dbf Mon Sep 17 00:00:00 2001 From: zsfelfoldi Date: Fri, 8 May 2015 13:29:48 +0200 Subject: [PATCH 12/19] ToVal serialization fix --- jsre/jsre.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/jsre/jsre.go b/jsre/jsre.go index 7de41428bd..adf2f8121f 100644 --- a/jsre/jsre.go +++ b/jsre/jsre.go @@ -297,7 +297,7 @@ func (self *JSRE) PrettyPrint(v interface{}) (val otto.Value, err error) { return method.Call(method, v) } -// creates an otto value from a go type +// creates an otto value from a go type (serialized version) func (self *JSRE) ToValue(v interface{}) (otto.Value, error) { done := make(chan bool) req := &evalReq{ @@ -311,9 +311,10 @@ func (self *JSRE) ToValue(v interface{}) (otto.Value, error) { return req.res.result, req.res.err } +// creates an otto value from a go type (non-serialized version) func (self *JSRE) ToVal(v interface{}) otto.Value { - result, err := self.ToValue(v) + result, err := self.vm.ToValue(v) if err != nil { fmt.Println("Value unknown:", err) return otto.UndefinedValue() From bd5720f4804788d91154a10ef5bb10425c502658 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 8 May 2015 15:22:48 +0300 Subject: [PATCH 13/19] eth, eth/downloader: handle sync errors a bit more gracefully --- eth/downloader/downloader.go | 28 ++++++++-------- eth/downloader/downloader_test.go | 6 ++-- eth/handler.go | 8 ++--- eth/sync.go | 54 ++++++++++++++++--------------- 4 files changed, 48 insertions(+), 48 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index ef2a193ffa..a97cce1ef6 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -24,12 +24,12 @@ var ( blockTtl = 20 * time.Second // The amount of time it takes for a block request to time out errLowTd = errors.New("peer's TD is too low") - errBusy = errors.New("busy") + ErrBusy = errors.New("busy") errUnknownPeer = errors.New("peer's unknown or unhealthy") - ErrBadPeer = errors.New("action from bad peer ignored") + errBadPeer = errors.New("action from bad peer ignored") errNoPeers = errors.New("no peers to keep download active") errPendingQueue = errors.New("pending items in queue") - errTimeout = errors.New("timeout") + ErrTimeout = errors.New("timeout") errEmptyHashSet = errors.New("empty hash set by peer") errPeersUnavailable = errors.New("no peers available or all peers tried for block download process") errAlreadyInPool = errors.New("hash already in pool") @@ -68,7 +68,7 @@ type Downloader struct { getBlock getBlockFn // Status - synchronizing int32 + synchronising int32 // Channels newPeerCh chan *peer @@ -119,15 +119,15 @@ func (d *Downloader) UnregisterPeer(id string) { delete(d.peers, id) } -// Synchronize will select the peer and use it for synchronizing. If an empty string is given +// Synchronise will select the peer and use it for synchronising. If an empty string is given // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the // checks fail an error will be returned. This method is synchronous -func (d *Downloader) Synchronize(id string, hash common.Hash) error { +func (d *Downloader) Synchronise(id string, hash common.Hash) error { // Make sure only one goroutine is ever allowed past this point at once - if !atomic.CompareAndSwapInt32(&d.synchronizing, 0, 1) { - return nil + if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { + return ErrBusy } - defer atomic.StoreInt32(&d.synchronizing, 0) + defer atomic.StoreInt32(&d.synchronising, 0) // Abort if the queue still contains some leftover data if _, cached := d.queue.Size(); cached > 0 { @@ -272,7 +272,7 @@ out: // the zero hash. if p == nil || (hash == common.Hash{}) { d.queue.Reset() - return errTimeout + return ErrTimeout } // set p to the active peer. this will invalidate any hashes that may be returned @@ -282,7 +282,7 @@ out: glog.V(logger.Debug).Infof("Hash fetching switched to new peer(%s)\n", p.id) } } - glog.V(logger.Detail).Infof("Downloaded hashes (%d) in %v\n", d.queue.Pending(), time.Since(start)) + glog.V(logger.Debug).Infof("Downloaded hashes (%d) in %v\n", d.queue.Pending(), time.Since(start)) return nil } @@ -384,7 +384,6 @@ out: } } } - glog.V(logger.Detail).Infoln("Downloaded block(s) in", time.Since(start)) return nil @@ -404,11 +403,10 @@ func (d *Downloader) AddHashes(id string, hashes []common.Hash) error { return fmt.Errorf("received hashes from %s while active peer is %s", id, d.activePeer) } - if glog.V(logger.Detail) && len(hashes) != 0 { + if glog.V(logger.Debug) && len(hashes) != 0 { from, to := hashes[0], hashes[len(hashes)-1] - glog.Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.Pending(), from[:4], to[:4], id) + glog.V(logger.Debug).Infof("adding %d (T=%d) hashes [ %x / %x ] from: %s\n", len(hashes), d.queue.Pending(), from[:4], to[:4], id) } - d.hashCh <- hashPack{id, hashes} return nil diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index f3402794b8..8ccc4d1a5d 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -61,7 +61,7 @@ func newTester(t *testing.T, hashes []common.Hash, blocks map[common.Hash]*types func (dl *downloadTester) sync(peerId string, hash common.Hash) error { dl.activePeerId = peerId - return dl.downloader.Synchronize(peerId, hash) + return dl.downloader.Synchronise(peerId, hash) } func (dl *downloadTester) hasBlock(hash common.Hash) bool { @@ -217,13 +217,13 @@ func TestThrottling(t *testing.T) { } }() - // Synchronize the two threads and verify + // Synchronise the two threads and verify err := <-errc done <- struct{}{} <-done if err != nil { - t.Fatalf("failed to synchronize blocks: %v", err) + t.Fatalf("failed to synchronise blocks: %v", err) } if len(took) != targetBlocks { t.Fatalf("downloaded block mismatch: have %v, want %v", len(took), targetBlocks) diff --git a/eth/handler.go b/eth/handler.go index b2018f3368..41b6728d96 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -19,9 +19,9 @@ import ( ) const ( - peerCountTimeout = 12 * time.Second // Amount of time it takes for the peer handler to ignore minDesiredPeerCount - blockProcTimer = 500 * time.Millisecond - minDesiredPeerCount = 5 // Amount of peers desired to start syncing + forceSyncCycle = 10 * time.Second // Time interval to force syncs, even if few peers are available + blockProcCycle = 500 * time.Millisecond // Time interval to check for new blocks to process + minDesiredPeerCount = 5 // Amount of peers desired to start syncing blockProcAmount = 256 ) @@ -324,7 +324,7 @@ func (self *ProtocolManager) handleMsg(p *peer) error { } self.BroadcastBlock(hash, request.Block) } else { - go self.synchronize(p) + go self.synchronise(p) } default: return errResp(ErrInvalidMsgCode, "%v", msg.Code) diff --git a/eth/sync.go b/eth/sync.go index b259c1d475..c49f5209d2 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -12,10 +12,8 @@ import ( // Sync contains all synchronisation code for the eth protocol func (pm *ProtocolManager) update() { - // itimer is used to determine when to start ignoring `minDesiredPeerCount` - itimer := time.NewTimer(peerCountTimeout) - // btimer is used for picking of blocks from the downloader - btimer := time.Tick(blockProcTimer) + forceSync := time.Tick(forceSyncCycle) + blockProc := time.Tick(blockProcCycle) for { select { @@ -24,27 +22,22 @@ func (pm *ProtocolManager) update() { if len(pm.peers) < minDesiredPeerCount { break } - - // Find the best peer + // Find the best peer and synchronise with it peer := getBestPeer(pm.peers) if peer == nil { - glog.V(logger.Debug).Infoln("Sync attempt cancelled. No peers available") + glog.V(logger.Debug).Infoln("Sync attempt canceled. No peers available") } + go pm.synchronise(peer) - itimer.Stop() - go pm.synchronize(peer) - case <-itimer.C: - // The timer will make sure that the downloader keeps an active state - // in which it attempts to always check the network for highest td peers - // Either select the peer or restart the timer if no peers could - // be selected. + case <-forceSync: + // Force a sync even if not enough peers are present if peer := getBestPeer(pm.peers); peer != nil { - go pm.synchronize(peer) - } else { - itimer.Reset(5 * time.Second) + go pm.synchronise(peer) } - case <-btimer: + case <-blockProc: + // Try to pull some blocks from the downloaded go pm.processBlocks() + case <-pm.quitSync: return } @@ -59,11 +52,11 @@ func (pm *ProtocolManager) processBlocks() error { pm.wg.Add(1) defer pm.wg.Done() + // Take a batch of blocks (will return nil if a previous batch has not reached the chain yet) blocks := pm.downloader.TakeBlocks() if len(blocks) == 0 { return nil } - glog.V(logger.Debug).Infof("Inserting chain with %d blocks (#%v - #%v)\n", len(blocks), blocks[0].Number(), blocks[len(blocks)-1].Number()) for len(blocks) != 0 && !pm.quit { @@ -77,7 +70,7 @@ func (pm *ProtocolManager) processBlocks() error { return nil } -func (pm *ProtocolManager) synchronize(peer *peer) { +func (pm *ProtocolManager) synchronise(peer *peer) { // Make sure the peer's TD is higher than our own. If not drop. if peer.td.Cmp(pm.chainman.Td()) <= 0 { return @@ -89,12 +82,21 @@ func (pm *ProtocolManager) synchronize(peer *peer) { return } // Get the hashes from the peer (synchronously) - err := pm.downloader.Synchronize(peer.id, peer.recentHash) - if err != nil && err == downloader.ErrBadPeer { - glog.V(logger.Debug).Infoln("removed peer from peer set due to bad action") + glog.V(logger.Debug).Infof("Attempting synchronisation: %v, 0x%x", peer.id, peer.recentHash) + + err := pm.downloader.Synchronise(peer.id, peer.recentHash) + switch err { + case nil: + glog.V(logger.Debug).Infof("Synchronisation completed") + + case downloader.ErrBusy: + glog.V(logger.Debug).Infof("Synchronisation already in progress") + + case downloader.ErrTimeout: + glog.V(logger.Debug).Infof("Removing peer %v due to sync timeout", peer.id) pm.removePeer(peer) - } else if err != nil { - // handle error - glog.V(logger.Detail).Infoln("error downloading:", err) + + default: + glog.V(logger.Warn).Infof("Synchronisation failed: %v", err) } } From 914e57e49bea0617515e1935972c5990a222cd7b Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 8 May 2015 15:54:35 +0200 Subject: [PATCH 14/19] p2p: fix disconnect at capacity With the introduction of static/trusted nodes, the peer count can go above MaxPeers. Update the capacity check to handle this. While here, decouple the trusted nodes check from the handshake by passing a function instead. --- p2p/handshake.go | 24 ++++++++++++------------ p2p/handshake_test.go | 5 +++-- p2p/server.go | 29 +++++++++++++++++------------ p2p/server_test.go | 21 ++++++++++++++------- 4 files changed, 46 insertions(+), 33 deletions(-) diff --git a/p2p/handshake.go b/p2p/handshake.go index 8e611cfd57..4cdcee6d4d 100644 --- a/p2p/handshake.go +++ b/p2p/handshake.go @@ -65,26 +65,26 @@ type protoHandshake struct { ID discover.NodeID } -// setupConn starts a protocol session on the given connection. -// It runs the encryption handshake and the protocol handshake. -// If dial is non-nil, the connection the local node is the initiator. -// If atcap is true, the connection will be disconnected with DiscTooManyPeers -// after the key exchange. -func setupConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, dial *discover.Node, atcap bool, trusted map[discover.NodeID]bool) (*conn, error) { +// setupConn starts a protocol session on the given connection. It +// runs the encryption handshake and the protocol handshake. If dial +// is non-nil, the connection the local node is the initiator. If +// keepconn returns false, the connection will be disconnected with +// DiscTooManyPeers after the key exchange. +func setupConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, dial *discover.Node, keepconn func(discover.NodeID) bool) (*conn, error) { if dial == nil { - return setupInboundConn(fd, prv, our, atcap, trusted) + return setupInboundConn(fd, prv, our, keepconn) } else { - return setupOutboundConn(fd, prv, our, dial, atcap, trusted) + return setupOutboundConn(fd, prv, our, dial, keepconn) } } -func setupInboundConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, atcap bool, trusted map[discover.NodeID]bool) (*conn, error) { +func setupInboundConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, keepconn func(discover.NodeID) bool) (*conn, error) { secrets, err := receiverEncHandshake(fd, prv, nil) if err != nil { return nil, fmt.Errorf("encryption handshake failed: %v", err) } rw := newRlpxFrameRW(fd, secrets) - if atcap && !trusted[secrets.RemoteID] { + if !keepconn(secrets.RemoteID) { SendItems(rw, discMsg, DiscTooManyPeers) return nil, errors.New("we have too many peers") } @@ -99,13 +99,13 @@ func setupInboundConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, a return &conn{rw, rhs}, nil } -func setupOutboundConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, dial *discover.Node, atcap bool, trusted map[discover.NodeID]bool) (*conn, error) { +func setupOutboundConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, dial *discover.Node, keepconn func(discover.NodeID) bool) (*conn, error) { secrets, err := initiatorEncHandshake(fd, prv, dial.ID, nil) if err != nil { return nil, fmt.Errorf("encryption handshake failed: %v", err) } rw := newRlpxFrameRW(fd, secrets) - if atcap && !trusted[secrets.RemoteID] { + if !keepconn(secrets.RemoteID) { SendItems(rw, discMsg, DiscTooManyPeers) return nil, errors.New("we have too many peers") } diff --git a/p2p/handshake_test.go b/p2p/handshake_test.go index 9018e14f20..ab75921a36 100644 --- a/p2p/handshake_test.go +++ b/p2p/handshake_test.go @@ -141,9 +141,10 @@ func TestSetupConn(t *testing.T) { fd0, fd1 := net.Pipe() done := make(chan struct{}) + keepalways := func(discover.NodeID) bool { return true } go func() { defer close(done) - conn0, err := setupConn(fd0, prv0, hs0, node1, false, nil) + conn0, err := setupConn(fd0, prv0, hs0, node1, keepalways) if err != nil { t.Errorf("outbound side error: %v", err) return @@ -156,7 +157,7 @@ func TestSetupConn(t *testing.T) { } }() - conn1, err := setupConn(fd1, prv1, hs1, nil, false, nil) + conn1, err := setupConn(fd1, prv1, hs1, nil, keepalways) if err != nil { t.Fatalf("inbound side error: %v", err) } diff --git a/p2p/server.go b/p2p/server.go index 77f66f1673..0598547e4a 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -126,7 +126,7 @@ type Server struct { peerWG sync.WaitGroup // active peer goroutines } -type setupFunc func(net.Conn, *ecdsa.PrivateKey, *protoHandshake, *discover.Node, bool, map[discover.NodeID]bool) (*conn, error) +type setupFunc func(net.Conn, *ecdsa.PrivateKey, *protoHandshake, *discover.Node, func(discover.NodeID) bool) (*conn, error) type newPeerHook func(*Peer) // Peers returns all connected peers. @@ -506,17 +506,7 @@ func (srv *Server) startPeer(fd net.Conn, dest *discover.Node) { // the callers of startPeer added the peer to the wait group already. fd.SetDeadline(time.Now().Add(handshakeTimeout)) - // Check capacity, but override for static nodes - srv.lock.RLock() - atcap := len(srv.peers) == srv.MaxPeers - if dest != nil { - if _, ok := srv.staticNodes[dest.ID]; ok { - atcap = false - } - } - srv.lock.RUnlock() - - conn, err := srv.setupFunc(fd, srv.PrivateKey, srv.ourHandshake, dest, atcap, srv.trustedNodes) + conn, err := srv.setupFunc(fd, srv.PrivateKey, srv.ourHandshake, dest, srv.keepconn) if err != nil { fd.Close() glog.V(logger.Debug).Infof("Handshake with %v failed: %v", fd.RemoteAddr(), err) @@ -539,6 +529,21 @@ func (srv *Server) startPeer(fd net.Conn, dest *discover.Node) { go srv.runPeer(p) } +// preflight checks whether a connection should be kept. it runs +// after the encryption handshake, as soon as the remote identity is +// known. +func (srv *Server) keepconn(id discover.NodeID) bool { + srv.lock.RLock() + defer srv.lock.RUnlock() + if _, ok := srv.staticNodes[id]; ok { + return true // static nodes are always allowed + } + if _, ok := srv.trustedNodes[id]; ok { + return true // trusted nodes are always allowed + } + return len(srv.peers) < srv.MaxPeers +} + func (srv *Server) runPeer(p *Peer) { glog.V(logger.Debug).Infof("Added %v\n", p) srvjslog.LogJson(&logger.P2PConnected{ diff --git a/p2p/server_test.go b/p2p/server_test.go index a5e56fa186..bf9df31abb 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -22,8 +22,11 @@ func startTestServer(t *testing.T, pf newPeerHook) *Server { ListenAddr: "127.0.0.1:0", PrivateKey: newkey(), newPeerHook: pf, - setupFunc: func(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, dial *discover.Node, atcap bool, trusted map[discover.NodeID]bool) (*conn, error) { + setupFunc: func(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, dial *discover.Node, keepconn func(discover.NodeID) bool) (*conn, error) { id := randomID() + if !keepconn(id) { + return nil, DiscAlreadyConnected + } rw := newRlpxFrameRW(fd, secrets{ MAC: zero16, AES: zero16, @@ -200,7 +203,7 @@ func TestServerDisconnectAtCap(t *testing.T) { // Run the handshakes just like a real peer would. key := newkey() hs := &protoHandshake{Version: baseProtocolVersion, ID: discover.PubkeyID(&key.PublicKey)} - _, err = setupConn(conn, key, hs, srv.Self(), false, srv.trustedNodes) + _, err = setupConn(conn, key, hs, srv.Self(), keepalways) if i == nconns-1 { // When handling the last connection, the server should // disconnect immediately instead of running the protocol @@ -250,7 +253,7 @@ func TestServerStaticPeers(t *testing.T) { // Run the handshakes just like a real peer would, and wait for completion key := newkey() shake := &protoHandshake{Version: baseProtocolVersion, ID: discover.PubkeyID(&key.PublicKey)} - if _, err = setupConn(conn, key, shake, server.Self(), false, server.trustedNodes); err != nil { + if _, err = setupConn(conn, key, shake, server.Self(), keepalways); err != nil { t.Fatalf("conn %d: unexpected error: %v", i, err) } <-started @@ -344,7 +347,7 @@ func TestServerTrustedPeers(t *testing.T) { // Run the handshakes just like a real peer would, and wait for completion key := newkey() shake := &protoHandshake{Version: baseProtocolVersion, ID: discover.PubkeyID(&key.PublicKey)} - if _, err = setupConn(conn, key, shake, server.Self(), false, server.trustedNodes); err != nil { + if _, err = setupConn(conn, key, shake, server.Self(), keepalways); err != nil { t.Fatalf("conn %d: unexpected error: %v", i, err) } <-started @@ -357,7 +360,7 @@ func TestServerTrustedPeers(t *testing.T) { defer conn.Close() shake := &protoHandshake{Version: baseProtocolVersion, ID: trusted.ID} - if _, err = setupConn(conn, key, shake, server.Self(), false, server.trustedNodes); err != nil { + if _, err = setupConn(conn, key, shake, server.Self(), keepalways); err != nil { t.Fatalf("trusted node: unexpected error: %v", err) } select { @@ -472,7 +475,7 @@ func TestServerMaxPendingAccepts(t *testing.T) { go func() { key := newkey() shake := &protoHandshake{Version: baseProtocolVersion, ID: discover.PubkeyID(&key.PublicKey)} - if _, err := setupConn(conns[1], key, shake, server.Self(), false, server.trustedNodes); err != nil { + if _, err := setupConn(conns[1], key, shake, server.Self(), keepalways); err != nil { t.Fatalf("failed to run handshake: %v", err) } }() @@ -486,7 +489,7 @@ func TestServerMaxPendingAccepts(t *testing.T) { go func() { key := newkey() shake := &protoHandshake{Version: baseProtocolVersion, ID: discover.PubkeyID(&key.PublicKey)} - if _, err := setupConn(conns[0], key, shake, server.Self(), false, server.trustedNodes); err != nil { + if _, err := setupConn(conns[0], key, shake, server.Self(), keepalways); err != nil { t.Fatalf("failed to run handshake: %v", err) } }() @@ -513,3 +516,7 @@ func randomID() (id discover.NodeID) { } return id } + +func keepalways(id discover.NodeID) bool { + return true +} From 9c0f36c46dd85f02c6c02cc646714b2576a70f27 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 8 May 2015 15:58:19 +0200 Subject: [PATCH 15/19] p2p: use maxDialingConns instead of maxAcceptConns as dial limit --- p2p/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/p2p/server.go b/p2p/server.go index 0598547e4a..171798a1df 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -412,7 +412,7 @@ func (srv *Server) dialLoop() { defer refresh.Stop() // Limit the number of concurrent dials - tokens := maxAcceptConns + tokens := maxDialingConns if srv.MaxPendingPeers > 0 { tokens = srv.MaxPendingPeers } From e45d9bb29d3c04d57fd40533b43ea7929b6a4513 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 8 May 2015 16:01:31 +0200 Subject: [PATCH 16/19] cmd/utils: bump default maxpeers to 25 This should improve ethereum block propagation times since we're not not broadcasting blocks to 100% of peers. --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a2ff05440f..b18d9851f8 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -195,7 +195,7 @@ var ( MaxPeersFlag = cli.IntFlag{ Name: "maxpeers", Usage: "Maximum number of network peers (network disabled if set to 0)", - Value: 16, + Value: 25, } MaxPendingPeersFlag = cli.IntFlag{ Name: "maxpendpeers", From d4f0a67323dec12e5b84ba4907970267a2e27601 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 8 May 2015 16:09:38 +0200 Subject: [PATCH 17/19] p2p: drop connections with no matching protocols --- p2p/peer.go | 12 ++++++++++++ p2p/server.go | 13 +++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/p2p/peer.go b/p2p/peer.go index cdf9ba9652..ac691f2ce8 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -211,6 +211,18 @@ func (p *Peer) handle(msg Msg) error { return nil } +func countMatchingProtocols(protocols []Protocol, caps []Cap) int { + n := 0 + for _, cap := range caps { + for _, proto := range protocols { + if proto.Name == cap.Name && proto.Version == cap.Version { + n++ + } + } + } + return n +} + // matchProtocols creates structures for matching named subprotocols. func matchProtocols(protocols []Protocol, caps []Cap, rw MsgReadWriter) map[string]*protoRW { sort.Sort(capsByName(caps)) diff --git a/p2p/server.go b/p2p/server.go index 171798a1df..3c6fb58939 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -518,7 +518,7 @@ func (srv *Server) startPeer(fd net.Conn, dest *discover.Node) { conn: fd, rtimeout: frameReadTimeout, wtimeout: frameWriteTimeout, } p := newPeer(fd, conn, srv.Protocols) - if ok, reason := srv.addPeer(conn.ID, p); !ok { + if ok, reason := srv.addPeer(conn, p); !ok { glog.V(logger.Detail).Infof("Not adding %v (%v)\n", p, reason) p.politeDisconnect(reason) srv.peerWG.Done() @@ -564,13 +564,18 @@ func (srv *Server) runPeer(p *Peer) { }) } -func (srv *Server) addPeer(id discover.NodeID, p *Peer) (bool, DiscReason) { +func (srv *Server) addPeer(conn *conn, p *Peer) (bool, DiscReason) { + // drop connections with no matching protocols. + if len(srv.Protocols) > 0 && countMatchingProtocols(srv.Protocols, conn.protoHandshake.Caps) == 0 { + return false, DiscUselessPeer + } + // add the peer if it passes the other checks. srv.lock.Lock() defer srv.lock.Unlock() - if ok, reason := srv.checkPeer(id); !ok { + if ok, reason := srv.checkPeer(conn.ID); !ok { return false, reason } - srv.peers[id] = p + srv.peers[conn.ID] = p return true, 0 } From edad47bf0e68ad02ee0cb6efd776c9f9be67ad8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 8 May 2015 17:21:11 +0300 Subject: [PATCH 18/19] eth/downloader: fix leftover state between syncs --- eth/downloader/downloader.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index a97cce1ef6..18f8d2ba8d 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -130,9 +130,12 @@ func (d *Downloader) Synchronise(id string, hash common.Hash) error { defer atomic.StoreInt32(&d.synchronising, 0) // Abort if the queue still contains some leftover data - if _, cached := d.queue.Size(); cached > 0 { + if _, cached := d.queue.Size(); cached > 0 && d.queue.GetHeadBlock() != nil { return errPendingQueue } + // Reset the queue to clean any internal leftover state + d.queue.Reset() + // Retrieve the origin peer and initiate the downloading process p := d.peers[id] if p == nil { From c8fc4cebe63073fd77d5f553a4f0cec36a4ccb4b Mon Sep 17 00:00:00 2001 From: obscuren Date: Fri, 8 May 2015 17:24:41 +0200 Subject: [PATCH 19/19] version 0.9.17 --- cmd/geth/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index a5187bf766..fd6925e6db 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -51,7 +51,7 @@ import _ "net/http/pprof" const ( ClientIdentifier = "Geth" - Version = "0.9.16" + Version = "0.9.17" ) var (