eth/downloader: cap the hash ban set, add test for it

pull/1153/head
Péter Szilágyi 10 years ago
parent 4b2dd44711
commit 63c6cedb14
  1. 21
      eth/downloader/downloader.go
  2. 43
      eth/downloader/downloader_test.go
  3. 2
      eth/downloader/peer.go
  4. 2
      eth/downloader/queue.go
  5. 4
      eth/handler.go
  6. 2
      eth/peer.go

@ -17,18 +17,17 @@ import (
"gopkg.in/fatih/set.v0"
)
const (
var (
MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling
MaxHashFetch = 2048 // Amount of hashes to be fetched per retrieval request
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
hashTTL = 5 * time.Second // Time it takes for a hash request to time out
)
var (
blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth
blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired
crossCheckCycle = time.Second // Period after which to check for expired cross checks
maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out
)
var (
@ -602,9 +601,19 @@ func (d *Downloader) banBlocks(peerId string, head common.Hash) error {
}
index++
}
// Ban the head hash and phase out any excess
d.banned.Add(blocks[index].Hash())
glog.V(logger.Debug).Infof("Banned %d blocks from: %s\n", index+1, peerId)
for d.banned.Size() > maxBannedHashes {
d.banned.Each(func(item interface{}) bool {
// Skip any hard coded bans
if core.BadHashes[item.(common.Hash)] {
return true
}
d.banned.Remove(item)
return false
})
}
glog.V(logger.Debug).Infof("Banned %d blocks from: %s", index+1, peerId)
return nil
}
}

@ -7,6 +7,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
)
@ -559,3 +560,45 @@ func TestBannedChainStarvationAttack(t *testing.T) {
banned = bans
}
}
// Tests that if a peer sends excessively many/large invalid chains that are
// gradually banned, it will have an upper limit on the consumed memory and also
// the origin bad hashes will not be evacuated.
func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
// Reduce the test size a bit
MaxBlockFetch = 4
maxBannedHashes = 256
// Construct a banned chain with more chunks than the ban limit
hashes := createHashes(0, maxBannedHashes*MaxBlockFetch)
hashes[len(hashes)-1] = bannedHash // weird index to have non multiple of ban chunk size
blocks := createBlocksFromHashes(hashes)
// Create the tester and ban the selected hash
tester := newTester(t, hashes, blocks)
tester.downloader.banned.Add(bannedHash)
// Iteratively try to sync, and verify that the banned hash list grows until
// the head of the invalid chain is blocked too.
tester.newPeer("attack", big.NewInt(10000), hashes[0])
for {
// Try to sync with the attacker, check hash chain failure
if _, err := tester.syncTake("attack", hashes[0]); err != ErrInvalidChain {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, ErrInvalidChain)
}
// Short circuit if the entire chain was banned
if tester.downloader.banned.Has(hashes[0]) {
break
}
// Otherwise ensure we never exceed the memory allowance and the hard coded bans are untouched
if bans := tester.downloader.banned.Size(); bans > maxBannedHashes {
t.Fatalf("ban cap exceeded: have %v, want max %v", bans, maxBannedHashes)
}
for hash, _ := range core.BadHashes {
if !tester.downloader.banned.Has(hash) {
t.Fatalf("hard coded ban evacuated: %x", hash)
}
}
}
}

@ -94,7 +94,7 @@ func (p *peer) SetIdle() {
for {
// Calculate the new download bandwidth allowance
prev := atomic.LoadInt32(&p.capacity)
next := int32(math.Max(1, math.Min(MaxBlockFetch, float64(prev)*scale)))
next := int32(math.Max(1, math.Min(float64(MaxBlockFetch), float64(prev)*scale)))
// Try to update the old value
if atomic.CompareAndSwapInt32(&p.capacity, prev, next) {

@ -16,7 +16,7 @@ import (
"gopkg.in/karalabe/cookiejar.v2/collections/prque"
)
const (
var (
blockCacheLimit = 8 * MaxBlockFetch // Maximum number of blocks to cache before throttling the download
)

@ -213,8 +213,8 @@ func (self *ProtocolManager) handleMsg(p *peer) error {
return errResp(ErrDecode, "->msg %v: %v", msg, err)
}
if request.Amount > downloader.MaxHashFetch {
request.Amount = downloader.MaxHashFetch
if request.Amount > uint64(downloader.MaxHashFetch) {
request.Amount = uint64(downloader.MaxHashFetch)
}
hashes := self.chainman.GetBlockHashesFromHash(request.Hash, request.Amount)

@ -102,7 +102,7 @@ func (p *peer) sendTransaction(tx *types.Transaction) error {
func (p *peer) requestHashes(from common.Hash) error {
glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, downloader.MaxHashFetch, from[:4])
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, downloader.MaxHashFetch})
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, uint64(downloader.MaxHashFetch)})
}
func (p *peer) requestBlocks(hashes []common.Hash) error {

Loading…
Cancel
Save