Merge pull request #1351 from karalabe/eth61

Implement eth/61
pull/1390/head
Jeffrey Wilcke 9 years ago
commit 5caff3bc24
  1. 3
      cmd/geth/main.go
  2. 6
      cmd/utils/flags.go
  3. 62
      eth/backend.go
  4. 417
      eth/downloader/downloader.go
  5. 293
      eth/downloader/downloader_test.go
  6. 26
      eth/downloader/peer.go
  7. 28
      eth/downloader/queue.go
  8. 188
      eth/handler.go
  9. 28
      eth/metrics.go
  10. 178
      eth/peer.go
  11. 38
      eth/protocol.go
  12. 10
      eth/protocol_test.go
  13. 10
      eth/sync.go

@ -277,7 +277,6 @@ JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Conso
utils.ExecFlag, utils.ExecFlag,
utils.WhisperEnabledFlag, utils.WhisperEnabledFlag,
utils.VMDebugFlag, utils.VMDebugFlag,
utils.ProtocolVersionFlag,
utils.NetworkIdFlag, utils.NetworkIdFlag,
utils.RPCCORSDomainFlag, utils.RPCCORSDomainFlag,
utils.VerbosityFlag, utils.VerbosityFlag,
@ -644,7 +643,7 @@ func version(c *cli.Context) {
if gitCommit != "" { if gitCommit != "" {
fmt.Println("Git Commit:", gitCommit) fmt.Println("Git Commit:", gitCommit)
} }
fmt.Println("Protocol Version:", c.GlobalInt(utils.ProtocolVersionFlag.Name)) fmt.Println("Protocol Versions:", eth.ProtocolVersions)
fmt.Println("Network Id:", c.GlobalInt(utils.NetworkIdFlag.Name)) fmt.Println("Network Id:", c.GlobalInt(utils.NetworkIdFlag.Name))
fmt.Println("Go Version:", runtime.Version()) fmt.Println("Go Version:", runtime.Version())
fmt.Println("OS:", runtime.GOOS) fmt.Println("OS:", runtime.GOOS)

@ -82,11 +82,6 @@ var (
Usage: "Data directory to be used", Usage: "Data directory to be used",
Value: DirectoryString{common.DefaultDataDir()}, Value: DirectoryString{common.DefaultDataDir()},
} }
ProtocolVersionFlag = cli.IntFlag{
Name: "protocolversion",
Usage: "ETH protocol version (integer)",
Value: eth.ProtocolVersion,
}
NetworkIdFlag = cli.IntFlag{ NetworkIdFlag = cli.IntFlag{
Name: "networkid", Name: "networkid",
Usage: "Network Id (integer)", Usage: "Network Id (integer)",
@ -359,7 +354,6 @@ func MakeEthConfig(clientID, version string, ctx *cli.Context) *eth.Config {
return &eth.Config{ return &eth.Config{
Name: common.MakeName(clientID, version), Name: common.MakeName(clientID, version),
DataDir: ctx.GlobalString(DataDirFlag.Name), DataDir: ctx.GlobalString(DataDirFlag.Name),
ProtocolVersion: ctx.GlobalInt(ProtocolVersionFlag.Name),
GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name), GenesisNonce: ctx.GlobalInt(GenesisNonceFlag.Name),
BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name), BlockChainVersion: ctx.GlobalInt(BlockchainVersionFlag.Name),
SkipBcVersionCheck: false, SkipBcVersionCheck: false,

@ -11,8 +11,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/ethash" "github.com/ethereum/ethash"
"github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -26,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger"
"github.com/ethereum/go-ethereum/logger/glog" "github.com/ethereum/go-ethereum/logger/glog"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
@ -57,10 +56,9 @@ var (
) )
type Config struct { type Config struct {
Name string Name string
ProtocolVersion int NetworkId int
NetworkId int GenesisNonce int
GenesisNonce int
BlockChainVersion int BlockChainVersion int
SkipBcVersionCheck bool // e.g. blockchain export SkipBcVersionCheck bool // e.g. blockchain export
@ -226,7 +224,6 @@ type Ethereum struct {
autodagquit chan bool autodagquit chan bool
etherbase common.Address etherbase common.Address
clientVersion string clientVersion string
ethVersionId int
netVersionId int netVersionId int
shhVersionId int shhVersionId int
} }
@ -291,14 +288,20 @@ func New(config *Config) (*Ethereum, error) {
nodeDb := filepath.Join(config.DataDir, "nodes") nodeDb := filepath.Join(config.DataDir, "nodes")
// Perform database sanity checks // Perform database sanity checks
d, _ := blockDb.Get([]byte("ProtocolVersion")) /*
protov := int(common.NewValue(d).Uint()) // The databases were previously tied to protocol versions. Currently we
if protov != config.ProtocolVersion && protov != 0 { // are moving away from this decision as approaching Frontier. The below
path := filepath.Join(config.DataDir, "blockchain") // check was left in for now but should eventually be just dropped.
return nil, fmt.Errorf("Database version mismatch. Protocol(%d / %d). `rm -rf %s`", protov, config.ProtocolVersion, path)
} d, _ := blockDb.Get([]byte("ProtocolVersion"))
saveProtocolVersion(blockDb, config.ProtocolVersion) protov := int(common.NewValue(d).Uint())
glog.V(logger.Info).Infof("Protocol Version: %v, Network Id: %v", config.ProtocolVersion, config.NetworkId) if protov != config.ProtocolVersion && protov != 0 {
path := filepath.Join(config.DataDir, "blockchain")
return nil, fmt.Errorf("Database version mismatch. Protocol(%d / %d). `rm -rf %s`", protov, config.ProtocolVersion, path)
}
saveProtocolVersion(blockDb, config.ProtocolVersion)
*/
glog.V(logger.Info).Infof("Protocol Versions: %v, Network Id: %v", ProtocolVersions, config.NetworkId)
if !config.SkipBcVersionCheck { if !config.SkipBcVersionCheck {
b, _ := blockDb.Get([]byte("BlockchainVersion")) b, _ := blockDb.Get([]byte("BlockchainVersion"))
@ -321,7 +324,6 @@ func New(config *Config) (*Ethereum, error) {
DataDir: config.DataDir, DataDir: config.DataDir,
etherbase: common.HexToAddress(config.Etherbase), etherbase: common.HexToAddress(config.Etherbase),
clientVersion: config.Name, // TODO should separate from Name clientVersion: config.Name, // TODO should separate from Name
ethVersionId: config.ProtocolVersion,
netVersionId: config.NetworkId, netVersionId: config.NetworkId,
NatSpec: config.NatSpec, NatSpec: config.NatSpec,
MinerThreads: config.MinerThreads, MinerThreads: config.MinerThreads,
@ -345,7 +347,7 @@ func New(config *Config) (*Ethereum, error) {
eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.chainManager, eth.EventMux()) eth.blockProcessor = core.NewBlockProcessor(stateDb, extraDb, eth.pow, eth.chainManager, eth.EventMux())
eth.chainManager.SetProcessor(eth.blockProcessor) eth.chainManager.SetProcessor(eth.blockProcessor)
eth.protocolManager = NewProtocolManager(config.ProtocolVersion, config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager) eth.protocolManager = NewProtocolManager(config.NetworkId, eth.eventMux, eth.txPool, eth.pow, eth.chainManager)
eth.miner = miner.New(eth, eth.EventMux(), eth.pow) eth.miner = miner.New(eth, eth.EventMux(), eth.pow)
eth.miner.SetGasPrice(config.GasPrice) eth.miner.SetGasPrice(config.GasPrice)
@ -358,7 +360,7 @@ func New(config *Config) (*Ethereum, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
protocols := []p2p.Protocol{eth.protocolManager.SubProtocol} protocols := append([]p2p.Protocol{}, eth.protocolManager.SubProtocols...)
if config.Shh { if config.Shh {
protocols = append(protocols, eth.whisper.Protocol()) protocols = append(protocols, eth.whisper.Protocol())
} }
@ -495,7 +497,7 @@ func (s *Ethereum) PeerCount() int { return s.net.PeerCoun
func (s *Ethereum) Peers() []*p2p.Peer { return s.net.Peers() } func (s *Ethereum) Peers() []*p2p.Peer { return s.net.Peers() }
func (s *Ethereum) MaxPeers() int { return s.net.MaxPeers } func (s *Ethereum) MaxPeers() int { return s.net.MaxPeers }
func (s *Ethereum) ClientVersion() string { return s.clientVersion } func (s *Ethereum) ClientVersion() string { return s.clientVersion }
func (s *Ethereum) EthVersion() int { return s.ethVersionId } func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) }
func (s *Ethereum) NetVersion() int { return s.netVersionId } func (s *Ethereum) NetVersion() int { return s.netVersionId }
func (s *Ethereum) ShhVersion() int { return s.shhVersionId } func (s *Ethereum) ShhVersion() int { return s.shhVersionId }
func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader } func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader }
@ -504,7 +506,7 @@ func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolMana
func (s *Ethereum) Start() error { func (s *Ethereum) Start() error {
jsonlogger.LogJson(&logger.LogStarting{ jsonlogger.LogJson(&logger.LogStarting{
ClientString: s.net.Name, ClientString: s.net.Name,
ProtocolVersion: ProtocolVersion, ProtocolVersion: s.EthVersion(),
}) })
err := s.net.Start() err := s.net.Start()
if err != nil { if err != nil {
@ -560,7 +562,7 @@ done:
func (s *Ethereum) StartForTest() { func (s *Ethereum) StartForTest() {
jsonlogger.LogJson(&logger.LogStarting{ jsonlogger.LogJson(&logger.LogStarting{
ClientString: s.net.Name, ClientString: s.net.Name,
ProtocolVersion: ProtocolVersion, ProtocolVersion: s.EthVersion(),
}) })
} }
@ -667,14 +669,20 @@ func (self *Ethereum) StopAutoDAG() {
glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir) glog.V(logger.Info).Infof("Automatic pregeneration of ethash DAG OFF (ethash dir: %s)", ethash.DefaultDir)
} }
func saveProtocolVersion(db common.Database, protov int) { /*
d, _ := db.Get([]byte("ProtocolVersion")) // The databases were previously tied to protocol versions. Currently we
protocolVersion := common.NewValue(d).Uint() // are moving away from this decision as approaching Frontier. The below
// code was left in for now but should eventually be just dropped.
func saveProtocolVersion(db common.Database, protov int) {
d, _ := db.Get([]byte("ProtocolVersion"))
protocolVersion := common.NewValue(d).Uint()
if protocolVersion == 0 { if protocolVersion == 0 {
db.Put([]byte("ProtocolVersion"), common.NewValue(protov).Bytes()) db.Put([]byte("ProtocolVersion"), common.NewValue(protov).Bytes())
}
} }
} */
func saveBlockchainVersion(db common.Database, bcVersion int) { func saveBlockchainVersion(db common.Database, bcVersion int) {
d, _ := db.Get([]byte("BlockchainVersion")) d, _ := db.Get([]byte("BlockchainVersion"))

@ -19,18 +19,24 @@ import (
"gopkg.in/fatih/set.v0" "gopkg.in/fatih/set.v0"
) )
const (
eth60 = 60 // Constant to check for old protocol support
eth61 = 61 // Constant to check for new protocol support
)
var ( var (
MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling MinHashFetch = 512 // Minimum amount of hashes to not consider a peer stalling
MaxHashFetch = 2048 // Amount of hashes to be fetched per retrieval request MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request
MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request
hashTTL = 5 * time.Second // Time it takes for a hash request to time out hashTTL = 5 * time.Second // Time it takes for a hash request to time out
blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth blockSoftTTL = 3 * time.Second // Request completion threshold for increasing or decreasing a peer's bandwidth
blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired blockHardTTL = 3 * blockSoftTTL // Maximum time allowance before a block request is considered expired
crossCheckCycle = time.Second // Period after which to check for expired cross checks crossCheckCycle = time.Second // Period after which to check for expired cross checks
maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out maxQueuedHashes = 256 * 1024 // Maximum number of hashes to queue for import (DOS protection)
maxBlockProcess = 256 // Number of blocks to import at once into the chain maxBannedHashes = 4096 // Number of bannable hashes before phasing old ones out
maxBlockProcess = 256 // Number of blocks to import at once into the chain
) )
var ( var (
@ -58,6 +64,9 @@ type hashCheckFn func(common.Hash) bool
// blockRetrievalFn is a callback type for retrieving a block from the local chain. // blockRetrievalFn is a callback type for retrieving a block from the local chain.
type blockRetrievalFn func(common.Hash) *types.Block type blockRetrievalFn func(common.Hash) *types.Block
// headRetrievalFn is a callback type for retrieving the head block from the local chain.
type headRetrievalFn func() *types.Block
// chainInsertFn is a callback type to insert a batch of blocks into the local chain. // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
type chainInsertFn func(types.Blocks) (int, error) type chainInsertFn func(types.Blocks) (int, error)
@ -98,6 +107,7 @@ type Downloader struct {
// Callbacks // Callbacks
hasBlock hashCheckFn // Checks if a block is present in the chain hasBlock hashCheckFn // Checks if a block is present in the chain
getBlock blockRetrievalFn // Retrieves a block from the chain getBlock blockRetrievalFn // Retrieves a block from the chain
headBlock headRetrievalFn // Retrieves the head block from the chain
insertChain chainInsertFn // Injects a batch of blocks into the chain insertChain chainInsertFn // Injects a batch of blocks into the chain
dropPeer peerDropFn // Drops a peer for misbehaving dropPeer peerDropFn // Drops a peer for misbehaving
@ -109,8 +119,9 @@ type Downloader struct {
// Channels // Channels
newPeerCh chan *peer newPeerCh chan *peer
hashCh chan hashPack hashCh chan hashPack // Channel receiving inbound hashes
blockCh chan blockPack blockCh chan blockPack // Channel receiving inbound blocks
processCh chan bool // Channel to signal the block fetcher of new or finished work
cancelCh chan struct{} // Channel to cancel mid-flight syncs cancelCh chan struct{} // Channel to cancel mid-flight syncs
cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers cancelLock sync.RWMutex // Lock to protect the cancel channel in delivers
@ -123,7 +134,7 @@ type Block struct {
} }
// New creates a new downloader to fetch hashes and blocks from remote peers. // New creates a new downloader to fetch hashes and blocks from remote peers.
func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader { func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, headBlock headRetrievalFn, insertChain chainInsertFn, dropPeer peerDropFn) *Downloader {
// Create the base downloader // Create the base downloader
downloader := &Downloader{ downloader := &Downloader{
mux: mux, mux: mux,
@ -131,11 +142,13 @@ func New(mux *event.TypeMux, hasBlock hashCheckFn, getBlock blockRetrievalFn, in
peers: newPeerSet(), peers: newPeerSet(),
hasBlock: hasBlock, hasBlock: hasBlock,
getBlock: getBlock, getBlock: getBlock,
headBlock: headBlock,
insertChain: insertChain, insertChain: insertChain,
dropPeer: dropPeer, dropPeer: dropPeer,
newPeerCh: make(chan *peer, 1), newPeerCh: make(chan *peer, 1),
hashCh: make(chan hashPack, 1), hashCh: make(chan hashPack, 1),
blockCh: make(chan blockPack, 1), blockCh: make(chan blockPack, 1),
processCh: make(chan bool, 1),
} }
// Inject all the known bad hashes // Inject all the known bad hashes
downloader.banned = set.New() downloader.banned = set.New()
@ -175,7 +188,7 @@ func (d *Downloader) Synchronising() bool {
// RegisterPeer injects a new download peer into the set of block source to be // RegisterPeer injects a new download peer into the set of block source to be
// used for fetching hashes and blocks from. // used for fetching hashes and blocks from.
func (d *Downloader) RegisterPeer(id string, head common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) error { func (d *Downloader) RegisterPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn) error {
// If the peer wants to send a banned hash, reject // If the peer wants to send a banned hash, reject
if d.banned.Has(head) { if d.banned.Has(head) {
glog.V(logger.Debug).Infoln("Register rejected, head hash banned:", id) glog.V(logger.Debug).Infoln("Register rejected, head hash banned:", id)
@ -183,7 +196,7 @@ func (d *Downloader) RegisterPeer(id string, head common.Hash, getHashes hashFet
} }
// Otherwise try to construct and register the peer // Otherwise try to construct and register the peer
glog.V(logger.Detail).Infoln("Registering peer", id) glog.V(logger.Detail).Infoln("Registering peer", id)
if err := d.peers.Register(newPeer(id, head, getHashes, getBlocks)); err != nil { if err := d.peers.Register(newPeer(id, version, head, getRelHashes, getAbsHashes, getBlocks)); err != nil {
glog.V(logger.Error).Infoln("Register failed:", err) glog.V(logger.Error).Infoln("Register failed:", err)
return err return err
} }
@ -289,12 +302,38 @@ func (d *Downloader) syncWithPeer(p *peer, hash common.Hash) (err error) {
} }
}() }()
glog.V(logger.Debug).Infoln("Synchronizing with the network using:", p.id) glog.V(logger.Debug).Infof("Synchronizing with the network using: %s, eth/%d", p.id, p.version)
if err = d.fetchHashes(p, hash); err != nil { switch p.version {
return err case eth60:
} // Old eth/60 version, use reverse hash retrieval algorithm
if err = d.fetchBlocks(); err != nil { if err = d.fetchHashes60(p, hash); err != nil {
return err return err
}
if err = d.fetchBlocks60(); err != nil {
return err
}
case eth61:
// New eth/61, use forward, concurrent hash and block retrieval algorithm
number, err := d.findAncestor(p)
if err != nil {
return err
}
errc := make(chan error, 2)
go func() { errc <- d.fetchHashes(p, number+1) }()
go func() { errc <- d.fetchBlocks(number + 1) }()
// If any fetcher fails, cancel the other
if err := <-errc; err != nil {
d.cancel()
<-errc
return err
}
return <-errc
default:
// Something very wrong, stop right here
glog.V(logger.Error).Infof("Unsupported eth protocol: %d", p.version)
return errBadPeer
} }
glog.V(logger.Debug).Infoln("Synchronization completed") glog.V(logger.Debug).Infoln("Synchronization completed")
@ -326,10 +365,10 @@ func (d *Downloader) Terminate() {
d.cancel() d.cancel()
} }
// fetchHahes starts retrieving hashes backwards from a specific peer and hash, // fetchHashes60 starts retrieving hashes backwards from a specific peer and hash,
// up until it finds a common ancestor. If the source peer times out, alternative // up until it finds a common ancestor. If the source peer times out, alternative
// ones are tried for continuation. // ones are tried for continuation.
func (d *Downloader) fetchHashes(p *peer, h common.Hash) error { func (d *Downloader) fetchHashes60(p *peer, h common.Hash) error {
var ( var (
start = time.Now() start = time.Now()
active = p // active peer will help determine the current active peer active = p // active peer will help determine the current active peer
@ -346,12 +385,12 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
<-timeout.C // timeout channel should be initially empty. <-timeout.C // timeout channel should be initially empty.
getHashes := func(from common.Hash) { getHashes := func(from common.Hash) {
go active.getHashes(from) go active.getRelHashes(from)
timeout.Reset(hashTTL) timeout.Reset(hashTTL)
} }
// Add the hash to the queue, and start hash retrieval. // Add the hash to the queue, and start hash retrieval.
d.queue.Insert([]common.Hash{h}) d.queue.Insert([]common.Hash{h}, false)
getHashes(h) getHashes(h)
attempted[p.id] = true attempted[p.id] = true
@ -377,7 +416,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
if d.banned.Has(hash) { if d.banned.Has(hash) {
glog.V(logger.Debug).Infof("Peer (%s) sent a known invalid chain", active.id) glog.V(logger.Debug).Infof("Peer (%s) sent a known invalid chain", active.id)
d.queue.Insert(hashPack.hashes[:index+1]) d.queue.Insert(hashPack.hashes[:index+1], false)
if err := d.banBlocks(active.id, hash); err != nil { if err := d.banBlocks(active.id, hash); err != nil {
glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err) glog.V(logger.Debug).Infof("Failed to ban batch of blocks: %v", err)
} }
@ -395,7 +434,7 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
} }
} }
// Insert all the new hashes, but only continue if got something useful // Insert all the new hashes, but only continue if got something useful
inserts := d.queue.Insert(hashPack.hashes) inserts := d.queue.Insert(hashPack.hashes, false)
if len(inserts) == 0 && !done { if len(inserts) == 0 && !done {
glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes", active.id) glog.V(logger.Debug).Infof("Peer (%s) responded with stale hashes", active.id)
return errBadPeer return errBadPeer
@ -422,9 +461,9 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
continue continue
} }
// We're done, prepare the download cache and proceed pulling the blocks // We're done, prepare the download cache and proceed pulling the blocks
offset := 0 offset := uint64(0)
if block := d.getBlock(head); block != nil { if block := d.getBlock(head); block != nil {
offset = int(block.NumberU64() + 1) offset = block.NumberU64() + 1
} }
d.queue.Prepare(offset) d.queue.Prepare(offset)
finished = true finished = true
@ -481,10 +520,10 @@ func (d *Downloader) fetchHashes(p *peer, h common.Hash) error {
return nil return nil
} }
// fetchBlocks iteratively downloads the entire schedules block-chain, taking // fetchBlocks60 iteratively downloads the entire schedules block-chain, taking
// any available peers, reserving a chunk of blocks for each, wait for delivery // any available peers, reserving a chunk of blocks for each, wait for delivery
// and periodically checking for timeouts. // and periodically checking for timeouts.
func (d *Downloader) fetchBlocks() error { func (d *Downloader) fetchBlocks60() error {
glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)") glog.V(logger.Debug).Infoln("Downloading", d.queue.Pending(), "block(s)")
start := time.Now() start := time.Now()
@ -619,6 +658,332 @@ out:
return nil return nil
} }
// findAncestor tries to locate the common ancestor block of the local chain and
// a remote peers blockchain. In the general case when our node was in sync and
// on the correct chain, checking the top N blocks should already get us a match.
// In the rare scenario when we ended up on a long soft fork (i.e. none of the
// head blocks match), we do a binary search to find the common ancestor.
func (d *Downloader) findAncestor(p *peer) (uint64, error) {
glog.V(logger.Debug).Infof("%v: looking for common ancestor", p)
// Request out head blocks to short circuit ancestor location
head := d.headBlock().NumberU64()
from := int64(head) - int64(MaxHashFetch)
if from < 0 {
from = 0
}
go p.getAbsHashes(uint64(from), MaxHashFetch)
// Wait for the remote response to the head fetch
number, hash := uint64(0), common.Hash{}
timeout := time.After(hashTTL)
for finished := false; !finished; {
select {
case <-d.cancelCh:
return 0, errCancelHashFetch
case hashPack := <-d.hashCh:
// Discard anything not from the origin peer
if hashPack.peerId != p.id {
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
break
}
// Make sure the peer actually gave something valid
hashes := hashPack.hashes
if len(hashes) == 0 {
glog.V(logger.Debug).Infof("%v: empty head hash set", p)
return 0, errEmptyHashSet
}
// Check if a common ancestor was found
finished = true
for i := len(hashes) - 1; i >= 0; i-- {
if d.hasBlock(hashes[i]) {
number, hash = uint64(from)+uint64(i), hashes[i]
break
}
}
case <-d.blockCh:
// Out of bounds blocks received, ignore them
case <-timeout:
glog.V(logger.Debug).Infof("%v: head hash timeout", p)
return 0, errTimeout
}
}
// If the head fetch already found an ancestor, return
if !common.EmptyHash(hash) {
glog.V(logger.Debug).Infof("%v: common ancestor: #%d [%x]", p, number, hash[:4])
return number, nil
}
// Ancestor not found, we need to binary search over our chain
start, end := uint64(0), head
for start+1 < end {
// Split our chain interval in two, and request the hash to cross check
check := (start + end) / 2
timeout := time.After(hashTTL)
go p.getAbsHashes(uint64(check), 1)
// Wait until a reply arrives to this request
for arrived := false; !arrived; {
select {
case <-d.cancelCh:
return 0, errCancelHashFetch
case hashPack := <-d.hashCh:
// Discard anything not from the origin peer
if hashPack.peerId != p.id {
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
break
}
// Make sure the peer actually gave something valid
hashes := hashPack.hashes
if len(hashes) != 1 {
glog.V(logger.Debug).Infof("%v: invalid search hash set (%d)", p, len(hashes))
return 0, errBadPeer
}
arrived = true
// Modify the search interval based on the response
block := d.getBlock(hashes[0])
if block == nil {
end = check
break
}
if block.NumberU64() != check {
glog.V(logger.Debug).Infof("%v: non requested hash #%d [%x], instead of #%d", p, block.NumberU64(), block.Hash().Bytes()[:4], check)
return 0, errBadPeer
}
start = check
case <-d.blockCh:
// Out of bounds blocks received, ignore them
case <-timeout:
glog.V(logger.Debug).Infof("%v: search hash timeout", p)
return 0, errTimeout
}
}
}
return start, nil
}
// fetchHashes keeps retrieving hashes from the requested number, until no more
// are returned, potentially throttling on the way.
func (d *Downloader) fetchHashes(p *peer, from uint64) error {
glog.V(logger.Debug).Infof("%v: downloading hashes from #%d", p, from)
// Create a timeout timer, and the associated hash fetcher
timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
<-timeout.C // timeout channel should be initially empty
defer timeout.Stop()
getHashes := func(from uint64) {
glog.V(logger.Detail).Infof("%v: fetching %d hashes from #%d", p, MaxHashFetch, from)
go p.getAbsHashes(from, MaxHashFetch)
timeout.Reset(hashTTL)
}
// Start pulling hashes, until all are exhausted
getHashes(from)
for {
select {
case <-d.cancelCh:
return errCancelHashFetch
case hashPack := <-d.hashCh:
// Make sure the active peer is giving us the hashes
if hashPack.peerId != p.id {
glog.V(logger.Debug).Infof("Received hashes from incorrect peer(%s)", hashPack.peerId)
break
}
timeout.Stop()
// If no more hashes are inbound, notify the block fetcher and return
if len(hashPack.hashes) == 0 {
glog.V(logger.Debug).Infof("%v: no available hashes", p)
select {
case d.processCh <- false:
case <-d.cancelCh:
}
return nil
}
// Otherwise insert all the new hashes, aborting in case of junk
glog.V(logger.Detail).Infof("%v: inserting %d hashes from #%d", p, len(hashPack.hashes), from)
inserts := d.queue.Insert(hashPack.hashes, true)
if len(inserts) != len(hashPack.hashes) {
glog.V(logger.Debug).Infof("%v: stale hashes", p)
return errBadPeer
}
// Notify the block fetcher of new hashes, but stop if queue is full
cont := d.queue.Pending() < maxQueuedHashes
select {
case d.processCh <- cont:
default:
}
if !cont {
return nil
}
// Queue not yet full, fetch the next batch
from += uint64(len(hashPack.hashes))
getHashes(from)
case <-timeout.C:
glog.V(logger.Debug).Infof("%v: hash request timed out", p)
return errTimeout
}
}
}
// fetchBlocks iteratively downloads the scheduled hashes, taking any available
// peers, reserving a chunk of blocks for each, waiting for delivery and also
// periodically checking for timeouts.
func (d *Downloader) fetchBlocks(from uint64) error {
glog.V(logger.Debug).Infof("Downloading blocks from #%d", from)
defer glog.V(logger.Debug).Infof("Block download terminated")
// Create a timeout timer for scheduling expiration tasks
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
update := make(chan struct{}, 1)
// Prepare the queue and fetch blocks until the hash fetcher's done
d.queue.Prepare(from)
finished := false
for {
select {
case <-d.cancelCh:
return errCancelBlockFetch
case blockPack := <-d.blockCh:
// If the peer was previously banned and failed to deliver it's pack
// in a reasonable time frame, ignore it's message.
if peer := d.peers.Peer(blockPack.peerId); peer != nil {
// Deliver the received chunk of blocks, and demote in case of errors
err := d.queue.Deliver(blockPack.peerId, blockPack.blocks)
switch err {
case nil:
// If no blocks were delivered, demote the peer (need the delivery above)
if len(blockPack.blocks) == 0 {
peer.Demote()
peer.SetIdle()
glog.V(logger.Detail).Infof("%s: no blocks delivered", peer)
break
}
// All was successful, promote the peer and potentially start processing
peer.Promote()
peer.SetIdle()
glog.V(logger.Detail).Infof("%s: delivered %d blocks", peer, len(blockPack.blocks))
go d.process()
case errInvalidChain:
// The hash chain is invalid (blocks are not ordered properly), abort
return err
case errNoFetchesPending:
// Peer probably timed out with its delivery but came through
// in the end, demote, but allow to to pull from this peer.
peer.Demote()
peer.SetIdle()
glog.V(logger.Detail).Infof("%s: out of bound delivery", peer)
case errStaleDelivery:
// Delivered something completely else than requested, usually
// caused by a timeout and delivery during a new sync cycle.
// Don't set it to idle as the original request should still be
// in flight.
peer.Demote()
glog.V(logger.Detail).Infof("%s: stale delivery", peer)
default:
// Peer did something semi-useful, demote but keep it around
peer.Demote()
peer.SetIdle()
glog.V(logger.Detail).Infof("%s: delivery partially failed: %v", peer, err)
go d.process()
}
}
// Blocks arrived, try to update the progress
select {
case update <- struct{}{}:
default:
}
case cont := <-d.processCh:
// The hash fetcher sent a continuation flag, check if it's done
if !cont {
finished = true
}
// Hashes arrive, try to update the progress
select {
case update <- struct{}{}:
default:
}
case <-ticker.C:
// Sanity check update the progress
select {
case update <- struct{}{}:
default:
}
case <-update:
// Short circuit if we lost all our peers
if d.peers.Len() == 0 {
return errNoPeers
}
// Check for block request timeouts and demote the responsible peers
for _, pid := range d.queue.Expire(blockHardTTL) {
if peer := d.peers.Peer(pid); peer != nil {
peer.Demote()
glog.V(logger.Detail).Infof("%s: block delivery timeout", peer)
}
}
// If there's noting more to fetch, wait or terminate
if d.queue.Pending() == 0 {
if d.queue.InFlight() == 0 && finished {
glog.V(logger.Debug).Infof("Block fetching completed")
return nil
}
break
}
// Send a download request to all idle peers, until throttled
for _, peer := range d.peers.IdlePeers() {
// Short circuit if throttling activated
if d.queue.Throttle() {
break
}
// Reserve a chunk of hashes for a peer. A nil can mean either that
// no more hashes are available, or that the peer is known not to
// have them.
request := d.queue.Reserve(peer, peer.Capacity())
if request == nil {
continue
}
if glog.V(logger.Detail) {
glog.Infof("%s: requesting %d blocks", peer, len(request.Hashes))
}
// Fetch the chunk and make sure any errors return the hashes to the queue
if err := peer.Fetch(request); err != nil {
glog.V(logger.Error).Infof("%v: fetch failed, rescheduling", peer)
d.queue.Cancel(request)
}
}
// Make sure that we have peers available for fetching. If all peers have been tried
// and all failed throw an error
if !d.queue.Throttle() && d.queue.InFlight() == 0 {
return errPeersUnavailable
}
}
}
}
// banBlocks retrieves a batch of blocks from a peer feeding us invalid hashes, // banBlocks retrieves a batch of blocks from a peer feeding us invalid hashes,
// and bans the head of the retrieved batch. // and bans the head of the retrieved batch.
// //

@ -21,7 +21,7 @@ var (
genesis = core.GenesisBlockForTesting(testdb, common.Address{}, big.NewInt(0)) genesis = core.GenesisBlockForTesting(testdb, common.Address{}, big.NewInt(0))
) )
// makeChain creates a chain of n blocks starting at and including // makeChain creates a chain of n blocks starting at but not including
// parent. the returned hash chain is ordered head->parent. // parent. the returned hash chain is ordered head->parent.
func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) { func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {
blocks := core.GenerateChain(parent, testdb, n, func(i int, gen *core.BlockGen) { blocks := core.GenerateChain(parent, testdb, n, func(i int, gen *core.BlockGen) {
@ -42,7 +42,7 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
// h2[:f] are different but have a common suffix of length n-f. // h2[:f] are different but have a common suffix of length n-f.
func makeChainFork(n, f int, parent *types.Block) (h1, h2 []common.Hash, b1, b2 map[common.Hash]*types.Block) { func makeChainFork(n, f int, parent *types.Block) (h1, h2 []common.Hash, b1, b2 map[common.Hash]*types.Block) {
// Create the common suffix. // Create the common suffix.
h, b := makeChain(n-f-1, 0, parent) h, b := makeChain(n-f, 0, parent)
// Create the forks. // Create the forks.
h1, b1 = makeChain(f, 1, b[h[0]]) h1, b1 = makeChain(f, 1, b[h[0]])
h1 = append(h1, h[1:]...) h1 = append(h1, h[1:]...)
@ -75,7 +75,7 @@ func newTester() *downloadTester {
peerHashes: make(map[string][]common.Hash), peerHashes: make(map[string][]common.Hash),
peerBlocks: make(map[string]map[common.Hash]*types.Block), peerBlocks: make(map[string]map[common.Hash]*types.Block),
} }
tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.insertChain, tester.dropPeer) tester.downloader = New(new(event.TypeMux), tester.hasBlock, tester.getBlock, tester.headBlock, tester.insertChain, tester.dropPeer)
return tester return tester
} }
@ -99,6 +99,11 @@ func (dl *downloadTester) getBlock(hash common.Hash) *types.Block {
return dl.ownBlocks[hash] return dl.ownBlocks[hash]
} }
// headBlock retrieves the current head block from the canonical chain.
func (dl *downloadTester) headBlock() *types.Block {
return dl.getBlock(dl.ownHashes[len(dl.ownHashes)-1])
}
// insertChain injects a new batch of blocks into the simulated chain. // insertChain injects a new batch of blocks into the simulated chain.
func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) { func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) {
for i, block := range blocks { for i, block := range blocks {
@ -112,15 +117,15 @@ func (dl *downloadTester) insertChain(blocks types.Blocks) (int, error) {
} }
// newPeer registers a new block download source into the downloader. // newPeer registers a new block download source into the downloader.
func (dl *downloadTester) newPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block) error { func (dl *downloadTester) newPeer(id string, version int, hashes []common.Hash, blocks map[common.Hash]*types.Block) error {
return dl.newSlowPeer(id, hashes, blocks, 0) return dl.newSlowPeer(id, version, hashes, blocks, 0)
} }
// newSlowPeer registers a new block download source into the downloader, with a // newSlowPeer registers a new block download source into the downloader, with a
// specific delay time on processing the network packets sent to it, simulating // specific delay time on processing the network packets sent to it, simulating
// potentially slow network IO. // potentially slow network IO.
func (dl *downloadTester) newSlowPeer(id string, hashes []common.Hash, blocks map[common.Hash]*types.Block, delay time.Duration) error { func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Hash, blocks map[common.Hash]*types.Block, delay time.Duration) error {
err := dl.downloader.RegisterPeer(id, hashes[0], dl.peerGetHashesFn(id, delay), dl.peerGetBlocksFn(id, delay)) err := dl.downloader.RegisterPeer(id, version, hashes[0], dl.peerGetRelHashesFn(id, delay), dl.peerGetAbsHashesFn(id, version, delay), dl.peerGetBlocksFn(id, delay))
if err == nil { if err == nil {
// Assign the owned hashes and blocks to the peer (deep copy) // Assign the owned hashes and blocks to the peer (deep copy)
dl.peerHashes[id] = make([]common.Hash, len(hashes)) dl.peerHashes[id] = make([]common.Hash, len(hashes))
@ -141,10 +146,10 @@ func (dl *downloadTester) dropPeer(id string) {
dl.downloader.UnregisterPeer(id) dl.downloader.UnregisterPeer(id)
} }
// peerGetBlocksFn constructs a getHashes function associated with a particular // peerGetRelHashesFn constructs a GetHashes function associated with a specific
// peer in the download tester. The returned function can be used to retrieve // peer in the download tester. The returned function can be used to retrieve
// batches of hashes from the particularly requested peer. // batches of hashes from the particularly requested peer.
func (dl *downloadTester) peerGetHashesFn(id string, delay time.Duration) func(head common.Hash) error { func (dl *downloadTester) peerGetRelHashesFn(id string, delay time.Duration) func(head common.Hash) error {
return func(head common.Hash) error { return func(head common.Hash) error {
time.Sleep(delay) time.Sleep(delay)
@ -174,13 +179,43 @@ func (dl *downloadTester) peerGetHashesFn(id string, delay time.Duration) func(h
} }
} }
// peerGetAbsHashesFn constructs a GetHashesFromNumber function associated with
// a particular peer in the download tester. The returned function can be used to
// retrieve batches of hashes from the particularly requested peer.
func (dl *downloadTester) peerGetAbsHashesFn(id string, version int, delay time.Duration) func(uint64, int) error {
// If the simulated peer runs eth/60, this message is not supported
if version == eth60 {
return func(uint64, int) error { return nil }
}
// Otherwise create a method to request the blocks by number
return func(head uint64, count int) error {
time.Sleep(delay)
limit := count
if dl.maxHashFetch > 0 {
limit = dl.maxHashFetch
}
// Gather the next batch of hashes
hashes := dl.peerHashes[id]
result := make([]common.Hash, 0, limit)
for i := 0; i < limit && len(hashes)-int(head)-1-i >= 0; i++ {
result = append(result, hashes[len(hashes)-int(head)-1-i])
}
// Delay delivery a bit to allow attacks to unfold
go func() {
time.Sleep(time.Millisecond)
dl.downloader.DeliverHashes(id, result)
}()
return nil
}
}
// peerGetBlocksFn constructs a getBlocks function associated with a particular // peerGetBlocksFn constructs a getBlocks function associated with a particular
// peer in the download tester. The returned function can be used to retrieve // peer in the download tester. The returned function can be used to retrieve
// batches of blocks from the particularly requested peer. // batches of blocks from the particularly requested peer.
func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error { func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([]common.Hash) error {
return func(hashes []common.Hash) error { return func(hashes []common.Hash) error {
time.Sleep(delay) time.Sleep(delay)
blocks := dl.peerBlocks[id] blocks := dl.peerBlocks[id]
result := make([]*types.Block, 0, len(hashes)) result := make([]*types.Block, 0, len(hashes))
for _, hash := range hashes { for _, hash := range hashes {
@ -195,13 +230,13 @@ func (dl *downloadTester) peerGetBlocksFn(id string, delay time.Duration) func([
} }
// Tests that simple synchronization, without throttling from a good peer works. // Tests that simple synchronization, without throttling from a good peer works.
func TestSynchronisation(t *testing.T) { func TestSynchronisation60(t *testing.T) {
// Create a small enough block chain to download and the tester // Create a small enough block chain to download and the tester
targetBlocks := blockCacheLimit - 15 targetBlocks := blockCacheLimit - 15
hashes, blocks := makeChain(targetBlocks, 0, genesis) hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester() tester := newTester()
tester.newPeer("peer", hashes, blocks) tester.newPeer("peer", eth60, hashes, blocks)
// Synchronise with the peer and make sure all blocks were retrieved // Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("peer"); err != nil { if err := tester.sync("peer"); err != nil {
@ -212,42 +247,79 @@ func TestSynchronisation(t *testing.T) {
} }
} }
// Tests that an inactive downloader will not accept incoming hashes and blocks. // Tests that simple synchronization against a canonical chain works correctly.
func TestInactiveDownloader(t *testing.T) { // In this test common ancestor lookup should be short circuited and not require
// binary searching.
func TestCanonicalSynchronisation(t *testing.T) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester() tester := newTester()
tester.newPeer("peer", eth61, hashes, blocks)
// Check that neither hashes nor blocks are accepted // Synchronise with the peer and make sure all blocks were retrieved
if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive { if err := tester.sync("peer"); err != nil {
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) t.Fatalf("failed to synchronise blocks: %v", err)
} }
if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive { if imported := len(tester.ownBlocks); imported != targetBlocks+1 {
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive) t.Fatalf("synchronised block mismatch: have %v, want %v", imported, targetBlocks+1)
} }
} }
// Tests that a canceled download wipes all previously accumulated state. // Tests that if a large batch of blocks are being downloaded, it is throttled
func TestCancel(t *testing.T) { // until the cached blocks are retrieved.
// Create a small enough block chain to download and the tester func TestThrottling60(t *testing.T) {
targetBlocks := blockCacheLimit - 15 // Create a long block chain to download and the tester
targetBlocks := 8 * blockCacheLimit
hashes, blocks := makeChain(targetBlocks, 0, genesis) hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester() tester := newTester()
tester.newPeer("peer", hashes, blocks) tester.newPeer("peer", eth60, hashes, blocks)
// Make sure canceling works with a pristine downloader // Wrap the importer to allow stepping
tester.downloader.cancel() done := make(chan int)
hashCount, blockCount := tester.downloader.queue.Size() tester.downloader.insertChain = func(blocks types.Blocks) (int, error) {
if hashCount > 0 || blockCount > 0 { n, err := tester.insertChain(blocks)
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount) done <- n
return n, err
} }
// Synchronise with the peer, but cancel afterwards // Start a synchronisation concurrently
if err := tester.sync("peer"); err != nil { errc := make(chan error)
t.Fatalf("failed to synchronise blocks: %v", err) go func() {
errc <- tester.sync("peer")
}()
// Iteratively take some blocks, always checking the retrieval count
for len(tester.ownBlocks) < targetBlocks+1 {
// Wait a bit for sync to throttle itself
var cached int
for start := time.Now(); time.Since(start) < 3*time.Second; {
time.Sleep(25 * time.Millisecond)
cached = len(tester.downloader.queue.blockPool)
if cached == blockCacheLimit || len(tester.ownBlocks)+cached == targetBlocks+1 {
break
}
}
// Make sure we filled up the cache, then exhaust it
time.Sleep(25 * time.Millisecond) // give it a chance to screw up
if cached != blockCacheLimit && len(tester.ownBlocks)+cached < targetBlocks+1 {
t.Fatalf("block count mismatch: have %v, want %v", cached, blockCacheLimit)
}
<-done // finish previous blocking import
for cached > maxBlockProcess {
cached -= <-done
}
time.Sleep(25 * time.Millisecond) // yield to the insertion
} }
tester.downloader.cancel() <-done // finish the last blocking import
hashCount, blockCount = tester.downloader.queue.Size()
if hashCount > 0 || blockCount > 0 { // Check that we haven't pulled more blocks than available
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount) if len(tester.ownBlocks) > targetBlocks+1 {
t.Fatalf("target block count mismatch: have %v, want %v", len(tester.ownBlocks), targetBlocks+1)
}
if err := <-errc; err != nil {
t.Fatalf("block synchronization failed: %v", err)
} }
} }
@ -259,7 +331,7 @@ func TestThrottling(t *testing.T) {
hashes, blocks := makeChain(targetBlocks, 0, genesis) hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester() tester := newTester()
tester.newPeer("peer", hashes, blocks) tester.newPeer("peer", eth61, hashes, blocks)
// Wrap the importer to allow stepping // Wrap the importer to allow stepping
done := make(chan int) done := make(chan int)
@ -307,6 +379,102 @@ func TestThrottling(t *testing.T) {
} }
} }
// Tests that simple synchronization against a forked chain works correctly. In
// this test common ancestor lookup should *not* be short circuited, and a full
// binary search should be executed.
func TestForkedSynchronisation(t *testing.T) {
// Create a long enough forked chain
common, fork := MaxHashFetch, 2*MaxHashFetch
hashesA, hashesB, blocksA, blocksB := makeChainFork(common+fork, fork, genesis)
tester := newTester()
tester.newPeer("fork A", eth61, hashesA, blocksA)
tester.newPeer("fork B", eth61, hashesB, blocksB)
// Synchronise with the peer and make sure all blocks were retrieved
if err := tester.sync("fork A"); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
if imported := len(tester.ownBlocks); imported != common+fork+1 {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, common+fork+1)
}
// Synchronise with the second peer and make sure that fork is pulled too
if err := tester.sync("fork B"); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
if imported := len(tester.ownBlocks); imported != common+2*fork+1 {
t.Fatalf("synchronised block mismatch: have %v, want %v", imported, common+2*fork+1)
}
}
// Tests that an inactive downloader will not accept incoming hashes and blocks.
func TestInactiveDownloader(t *testing.T) {
tester := newTester()
// Check that neither hashes nor blocks are accepted
if err := tester.downloader.DeliverHashes("bad peer", []common.Hash{}); err != errNoSyncActive {
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
}
if err := tester.downloader.DeliverBlocks("bad peer", []*types.Block{}); err != errNoSyncActive {
t.Errorf("error mismatch: have %v, want %v", err, errNoSyncActive)
}
}
// Tests that a canceled download wipes all previously accumulated state.
func TestCancel60(t *testing.T) {
// Create a small enough block chain to download and the tester
targetBlocks := blockCacheLimit - 15
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
tester.newPeer("peer", eth60, hashes, blocks)
// Make sure canceling works with a pristine downloader
tester.downloader.cancel()
hashCount, blockCount := tester.downloader.queue.Size()
if hashCount > 0 || blockCount > 0 {
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
}
// Synchronise with the peer, but cancel afterwards
if err := tester.sync("peer"); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
tester.downloader.cancel()
hashCount, blockCount = tester.downloader.queue.Size()
if hashCount > 0 || blockCount > 0 {
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
}
}
// Tests that a canceled download wipes all previously accumulated state.
func TestCancel(t *testing.T) {
// Create a small enough block chain to download and the tester
targetBlocks := blockCacheLimit - 15
if targetBlocks >= MaxHashFetch {
targetBlocks = MaxHashFetch - 15
}
hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester := newTester()
tester.newPeer("peer", eth61, hashes, blocks)
// Make sure canceling works with a pristine downloader
tester.downloader.cancel()
hashCount, blockCount := tester.downloader.queue.Size()
if hashCount > 0 || blockCount > 0 {
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
}
// Synchronise with the peer, but cancel afterwards
if err := tester.sync("peer"); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
}
tester.downloader.cancel()
hashCount, blockCount = tester.downloader.queue.Size()
if hashCount > 0 || blockCount > 0 {
t.Errorf("block or hash count mismatch: %d hashes, %d blocks, want 0", hashCount, blockCount)
}
}
// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). // Tests that synchronisation from multiple peers works as intended (multi thread sanity test).
func TestMultiSynchronisation(t *testing.T) { func TestMultiSynchronisation(t *testing.T) {
// Create various peers with various parts of the chain // Create various peers with various parts of the chain
@ -317,7 +485,7 @@ func TestMultiSynchronisation(t *testing.T) {
tester := newTester() tester := newTester()
for i := 0; i < targetPeers; i++ { for i := 0; i < targetPeers; i++ {
id := fmt.Sprintf("peer #%d", i) id := fmt.Sprintf("peer #%d", i)
tester.newPeer(id, hashes[i*blockCacheLimit:], blocks) tester.newPeer(id, eth60, hashes[i*blockCacheLimit:], blocks)
} }
// Synchronise with the middle peer and make sure half of the blocks were retrieved // Synchronise with the middle peer and make sure half of the blocks were retrieved
id := fmt.Sprintf("peer #%d", targetPeers/2) id := fmt.Sprintf("peer #%d", targetPeers/2)
@ -347,8 +515,8 @@ func TestSlowSynchronisation(t *testing.T) {
targetIODelay := time.Second targetIODelay := time.Second
hashes, blocks := makeChain(targetBlocks, 0, genesis) hashes, blocks := makeChain(targetBlocks, 0, genesis)
tester.newSlowPeer("fast", hashes, blocks, 0) tester.newSlowPeer("fast", eth60, hashes, blocks, 0)
tester.newSlowPeer("slow", hashes, blocks, targetIODelay) tester.newSlowPeer("slow", eth60, hashes, blocks, targetIODelay)
// Try to sync with the peers (pull hashes from fast) // Try to sync with the peers (pull hashes from fast)
start := time.Now() start := time.Now()
@ -370,13 +538,14 @@ func TestSlowSynchronisation(t *testing.T) {
func TestNonExistingParentAttack(t *testing.T) { func TestNonExistingParentAttack(t *testing.T) {
tester := newTester() tester := newTester()
// Forge a single-link chain with a forged header
hashes, blocks := makeChain(1, 0, genesis) hashes, blocks := makeChain(1, 0, genesis)
tester.newPeer("valid", hashes, blocks) tester.newPeer("valid", eth60, hashes, blocks)
wrongblock := types.NewBlock(&types.Header{}, nil, nil, nil) wrongblock := types.NewBlock(&types.Header{}, nil, nil, nil)
wrongblock.Td = blocks[hashes[0]].Td wrongblock.Td = blocks[hashes[0]].Td
hashes, blocks = makeChain(1, 0, wrongblock) hashes, blocks = makeChain(1, 0, wrongblock)
tester.newPeer("attack", hashes, blocks) tester.newPeer("attack", eth60, hashes, blocks)
// Try and sync with the malicious node and check that it fails // Try and sync with the malicious node and check that it fails
if err := tester.sync("attack"); err == nil { if err := tester.sync("attack"); err == nil {
@ -401,8 +570,8 @@ func TestRepeatingHashAttack(t *testing.T) { // TODO: Is this thing valid??
// Create a valid chain, but drop the last link // Create a valid chain, but drop the last link
hashes, blocks := makeChain(blockCacheLimit, 0, genesis) hashes, blocks := makeChain(blockCacheLimit, 0, genesis)
tester.newPeer("valid", hashes, blocks) tester.newPeer("valid", eth60, hashes, blocks)
tester.newPeer("attack", hashes[:len(hashes)-1], blocks) tester.newPeer("attack", eth60, hashes[:len(hashes)-1], blocks)
// Try and sync with the malicious node // Try and sync with the malicious node
errc := make(chan error) errc := make(chan error)
@ -431,10 +600,10 @@ func TestNonExistingBlockAttack(t *testing.T) {
// Create a valid chain, but forge the last link // Create a valid chain, but forge the last link
hashes, blocks := makeChain(blockCacheLimit, 0, genesis) hashes, blocks := makeChain(blockCacheLimit, 0, genesis)
tester.newPeer("valid", hashes, blocks) tester.newPeer("valid", eth60, hashes, blocks)
hashes[len(hashes)/2] = common.Hash{} hashes[len(hashes)/2] = common.Hash{}
tester.newPeer("attack", hashes, blocks) tester.newPeer("attack", eth60, hashes, blocks)
// Try and sync with the malicious node and check that it fails // Try and sync with the malicious node and check that it fails
if err := tester.sync("attack"); err != errPeersUnavailable { if err := tester.sync("attack"); err != errPeersUnavailable {
@ -453,7 +622,7 @@ func TestInvalidHashOrderAttack(t *testing.T) {
// Create a valid long chain, but reverse some hashes within // Create a valid long chain, but reverse some hashes within
hashes, blocks := makeChain(4*blockCacheLimit, 0, genesis) hashes, blocks := makeChain(4*blockCacheLimit, 0, genesis)
tester.newPeer("valid", hashes, blocks) tester.newPeer("valid", eth60, hashes, blocks)
chunk1 := make([]common.Hash, blockCacheLimit) chunk1 := make([]common.Hash, blockCacheLimit)
chunk2 := make([]common.Hash, blockCacheLimit) chunk2 := make([]common.Hash, blockCacheLimit)
@ -462,7 +631,7 @@ func TestInvalidHashOrderAttack(t *testing.T) {
copy(hashes[2*blockCacheLimit:], chunk1) copy(hashes[2*blockCacheLimit:], chunk1)
copy(hashes[blockCacheLimit:], chunk2) copy(hashes[blockCacheLimit:], chunk2)
tester.newPeer("attack", hashes, blocks) tester.newPeer("attack", eth60, hashes, blocks)
// Try and sync with the malicious node and check that it fails // Try and sync with the malicious node and check that it fails
if err := tester.sync("attack"); err != errInvalidChain { if err := tester.sync("attack"); err != errInvalidChain {
@ -489,8 +658,8 @@ func TestMadeupHashChainAttack(t *testing.T) {
rand.Read(randomHashes[i][:]) rand.Read(randomHashes[i][:])
} }
tester.newPeer("valid", hashes, blocks) tester.newPeer("valid", eth60, hashes, blocks)
tester.newPeer("attack", randomHashes, nil) tester.newPeer("attack", eth60, randomHashes, nil)
// Try and sync with the malicious node and check that it fails // Try and sync with the malicious node and check that it fails
if err := tester.sync("attack"); err != errCrossCheckFailed { if err := tester.sync("attack"); err != errCrossCheckFailed {
@ -517,7 +686,7 @@ func TestMadeupHashChainDrippingAttack(t *testing.T) {
// Try and sync with the attacker, one hash at a time // Try and sync with the attacker, one hash at a time
tester.maxHashFetch = 1 tester.maxHashFetch = 1
tester.newPeer("attack", randomHashes, nil) tester.newPeer("attack", eth60, randomHashes, nil)
if err := tester.sync("attack"); err != errStallingPeer { if err := tester.sync("attack"); err != errStallingPeer {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer) t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errStallingPeer)
} }
@ -540,7 +709,7 @@ func TestMadeupBlockChainAttack(t *testing.T) {
} }
// Try and sync with the malicious node and check that it fails // Try and sync with the malicious node and check that it fails
tester := newTester() tester := newTester()
tester.newPeer("attack", gapped, blocks) tester.newPeer("attack", eth60, gapped, blocks)
if err := tester.sync("attack"); err != errCrossCheckFailed { if err := tester.sync("attack"); err != errCrossCheckFailed {
t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed) t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errCrossCheckFailed)
} }
@ -548,13 +717,13 @@ func TestMadeupBlockChainAttack(t *testing.T) {
blockSoftTTL = defaultBlockTTL blockSoftTTL = defaultBlockTTL
crossCheckCycle = defaultCrossCheckCycle crossCheckCycle = defaultCrossCheckCycle
tester.newPeer("valid", hashes, blocks) tester.newPeer("valid", eth60, hashes, blocks)
if err := tester.sync("valid"); err != nil { if err := tester.sync("valid"); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err) t.Fatalf("failed to synchronise blocks: %v", err)
} }
} }
// tests that if one/multiple malicious peers try to feed a banned blockchain to // Tests that if one/multiple malicious peers try to feed a banned blockchain to
// the downloader, it will not keep refetching the same chain indefinitely, but // the downloader, it will not keep refetching the same chain indefinitely, but
// gradually block pieces of it, until its head is also blocked. // gradually block pieces of it, until its head is also blocked.
func TestBannedChainStarvationAttack(t *testing.T) { func TestBannedChainStarvationAttack(t *testing.T) {
@ -565,8 +734,8 @@ func TestBannedChainStarvationAttack(t *testing.T) {
// Create the tester and ban the selected hash. // Create the tester and ban the selected hash.
tester := newTester() tester := newTester()
tester.downloader.banned.Add(forkHashes[fork-1]) tester.downloader.banned.Add(forkHashes[fork-1])
tester.newPeer("valid", hashes, blocks) tester.newPeer("valid", eth60, hashes, blocks)
tester.newPeer("attack", forkHashes, forkBlocks) tester.newPeer("attack", eth60, forkHashes, forkBlocks)
// Iteratively try to sync, and verify that the banned hash list grows until // Iteratively try to sync, and verify that the banned hash list grows until
// the head of the invalid chain is blocked too. // the head of the invalid chain is blocked too.
@ -586,7 +755,7 @@ func TestBannedChainStarvationAttack(t *testing.T) {
banned = bans banned = bans
} }
// Check that after banning an entire chain, bad peers get dropped // Check that after banning an entire chain, bad peers get dropped
if err := tester.newPeer("new attacker", forkHashes, forkBlocks); err != errBannedHead { if err := tester.newPeer("new attacker", eth60, forkHashes, forkBlocks); err != errBannedHead {
t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead) t.Fatalf("peer registration mismatch: have %v, want %v", err, errBannedHead)
} }
if peer := tester.downloader.peers.Peer("new attacker"); peer != nil { if peer := tester.downloader.peers.Peer("new attacker"); peer != nil {
@ -618,8 +787,8 @@ func TestBannedChainMemoryExhaustionAttack(t *testing.T) {
MaxBlockFetch = 4 MaxBlockFetch = 4
maxBannedHashes = 256 maxBannedHashes = 256
tester.newPeer("valid", hashes, blocks) tester.newPeer("valid", eth60, hashes, blocks)
tester.newPeer("attack", forkHashes, forkBlocks) tester.newPeer("attack", eth60, forkHashes, forkBlocks)
// Iteratively try to sync, and verify that the banned hash list grows until // Iteratively try to sync, and verify that the banned hash list grows until
// the head of the invalid chain is blocked too. // the head of the invalid chain is blocked too.
@ -664,7 +833,7 @@ func TestOverlappingDeliveryAttack(t *testing.T) {
// Register an attacker that always returns non-requested blocks too // Register an attacker that always returns non-requested blocks too
tester := newTester() tester := newTester()
tester.newPeer("attack", hashes, blocks) tester.newPeer("attack", eth60, hashes, blocks)
rawGetBlocks := tester.downloader.peers.Peer("attack").getBlocks rawGetBlocks := tester.downloader.peers.Peer("attack").getBlocks
tester.downloader.peers.Peer("attack").getBlocks = func(request []common.Hash) error { tester.downloader.peers.Peer("attack").getBlocks = func(request []common.Hash) error {
@ -712,7 +881,7 @@ func TestHashAttackerDropping(t *testing.T) {
for i, tt := range tests { for i, tt := range tests {
// Register a new peer and ensure it's presence // Register a new peer and ensure it's presence
id := fmt.Sprintf("test %d", i) id := fmt.Sprintf("test %d", i)
if err := tester.newPeer(id, []common.Hash{genesis.Hash()}, nil); err != nil { if err := tester.newPeer(id, eth60, []common.Hash{genesis.Hash()}, nil); err != nil {
t.Fatalf("test %d: failed to register new peer: %v", i, err) t.Fatalf("test %d: failed to register new peer: %v", i, err)
} }
if _, ok := tester.peerHashes[id]; !ok { if _, ok := tester.peerHashes[id]; !ok {
@ -744,7 +913,7 @@ func TestBlockAttackerDropping(t *testing.T) {
for i, tt := range tests { for i, tt := range tests {
// Register a new peer and ensure it's presence // Register a new peer and ensure it's presence
id := fmt.Sprintf("test %d", i) id := fmt.Sprintf("test %d", i)
if err := tester.newPeer(id, []common.Hash{common.Hash{}}, nil); err != nil { if err := tester.newPeer(id, eth60, []common.Hash{common.Hash{}}, nil); err != nil {
t.Fatalf("test %d: failed to register new peer: %v", i, err) t.Fatalf("test %d: failed to register new peer: %v", i, err)
} }
if _, ok := tester.peerHashes[id]; !ok { if _, ok := tester.peerHashes[id]; !ok {

@ -15,7 +15,8 @@ import (
"gopkg.in/fatih/set.v0" "gopkg.in/fatih/set.v0"
) )
type hashFetcherFn func(common.Hash) error type relativeHashFetcherFn func(common.Hash) error
type absoluteHashFetcherFn func(uint64, int) error
type blockFetcherFn func([]common.Hash) error type blockFetcherFn func([]common.Hash) error
var ( var (
@ -37,20 +38,25 @@ type peer struct {
ignored *set.Set // Set of hashes not to request (didn't have previously) ignored *set.Set // Set of hashes not to request (didn't have previously)
getHashes hashFetcherFn // Method to retrieve a batch of hashes (mockable for testing) getRelHashes relativeHashFetcherFn // Method to retrieve a batch of hashes from an origin hash
getBlocks blockFetcherFn // Method to retrieve a batch of blocks (mockable for testing) getAbsHashes absoluteHashFetcherFn // Method to retrieve a batch of hashes from an absolute position
getBlocks blockFetcherFn // Method to retrieve a batch of blocks
version int // Eth protocol version number to switch strategies
} }
// newPeer create a new downloader peer, with specific hash and block retrieval // newPeer create a new downloader peer, with specific hash and block retrieval
// mechanisms. // mechanisms.
func newPeer(id string, head common.Hash, getHashes hashFetcherFn, getBlocks blockFetcherFn) *peer { func newPeer(id string, version int, head common.Hash, getRelHashes relativeHashFetcherFn, getAbsHashes absoluteHashFetcherFn, getBlocks blockFetcherFn) *peer {
return &peer{ return &peer{
id: id, id: id,
head: head, head: head,
capacity: 1, capacity: 1,
getHashes: getHashes, getRelHashes: getRelHashes,
getBlocks: getBlocks, getAbsHashes: getAbsHashes,
ignored: set.New(), getBlocks: getBlocks,
ignored: set.New(),
version: version,
} }
} }

@ -40,9 +40,9 @@ type queue struct {
pendPool map[string]*fetchRequest // Currently pending block retrieval operations pendPool map[string]*fetchRequest // Currently pending block retrieval operations
blockPool map[common.Hash]int // Hash-set of the downloaded data blocks, mapping to cache indexes blockPool map[common.Hash]uint64 // Hash-set of the downloaded data blocks, mapping to cache indexes
blockCache []*Block // Downloaded but not yet delivered blocks blockCache []*Block // Downloaded but not yet delivered blocks
blockOffset int // Offset of the first cached block in the block-chain blockOffset uint64 // Offset of the first cached block in the block-chain
lock sync.RWMutex lock sync.RWMutex
} }
@ -53,7 +53,7 @@ func newQueue() *queue {
hashPool: make(map[common.Hash]int), hashPool: make(map[common.Hash]int),
hashQueue: prque.New(), hashQueue: prque.New(),
pendPool: make(map[string]*fetchRequest), pendPool: make(map[string]*fetchRequest),
blockPool: make(map[common.Hash]int), blockPool: make(map[common.Hash]uint64),
blockCache: make([]*Block, blockCacheLimit), blockCache: make([]*Block, blockCacheLimit),
} }
} }
@ -69,7 +69,7 @@ func (q *queue) Reset() {
q.pendPool = make(map[string]*fetchRequest) q.pendPool = make(map[string]*fetchRequest)
q.blockPool = make(map[common.Hash]int) q.blockPool = make(map[common.Hash]uint64)
q.blockOffset = 0 q.blockOffset = 0
q.blockCache = make([]*Block, blockCacheLimit) q.blockCache = make([]*Block, blockCacheLimit)
} }
@ -130,7 +130,7 @@ func (q *queue) Has(hash common.Hash) bool {
// Insert adds a set of hashes for the download queue for scheduling, returning // Insert adds a set of hashes for the download queue for scheduling, returning
// the new hashes encountered. // the new hashes encountered.
func (q *queue) Insert(hashes []common.Hash) []common.Hash { func (q *queue) Insert(hashes []common.Hash, fifo bool) []common.Hash {
q.lock.Lock() q.lock.Lock()
defer q.lock.Unlock() defer q.lock.Unlock()
@ -147,7 +147,11 @@ func (q *queue) Insert(hashes []common.Hash) []common.Hash {
inserts = append(inserts, hash) inserts = append(inserts, hash)
q.hashPool[hash] = q.hashCounter q.hashPool[hash] = q.hashCounter
q.hashQueue.Push(hash, float32(q.hashCounter)) // Highest gets schedules first if fifo {
q.hashQueue.Push(hash, -float32(q.hashCounter)) // Lowest gets schedules first
} else {
q.hashQueue.Push(hash, float32(q.hashCounter)) // Highest gets schedules first
}
} }
return inserts return inserts
} }
@ -175,7 +179,7 @@ func (q *queue) GetBlock(hash common.Hash) *Block {
return nil return nil
} }
// Return the block if it's still available in the cache // Return the block if it's still available in the cache
if q.blockOffset <= index && index < q.blockOffset+len(q.blockCache) { if q.blockOffset <= index && index < q.blockOffset+uint64(len(q.blockCache)) {
return q.blockCache[index-q.blockOffset] return q.blockCache[index-q.blockOffset]
} }
return nil return nil
@ -202,7 +206,7 @@ func (q *queue) TakeBlocks() []*Block {
for k, n := len(q.blockCache)-len(blocks), len(q.blockCache); k < n; k++ { for k, n := len(q.blockCache)-len(blocks), len(q.blockCache); k < n; k++ {
q.blockCache[k] = nil q.blockCache[k] = nil
} }
q.blockOffset += len(blocks) q.blockOffset += uint64(len(blocks))
return blocks return blocks
} }
@ -318,7 +322,7 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
continue continue
} }
// If a requested block falls out of the range, the hash chain is invalid // If a requested block falls out of the range, the hash chain is invalid
index := int(block.NumberU64()) - q.blockOffset index := int(int64(block.NumberU64()) - int64(q.blockOffset))
if index >= len(q.blockCache) || index < 0 { if index >= len(q.blockCache) || index < 0 {
return errInvalidChain return errInvalidChain
} }
@ -329,7 +333,7 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
} }
delete(request.Hashes, hash) delete(request.Hashes, hash)
delete(q.hashPool, hash) delete(q.hashPool, hash)
q.blockPool[hash] = int(block.NumberU64()) q.blockPool[hash] = block.NumberU64()
} }
// Return all failed or missing fetches to the queue // Return all failed or missing fetches to the queue
for hash, index := range request.Hashes { for hash, index := range request.Hashes {
@ -346,7 +350,7 @@ func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
} }
// Prepare configures the block cache offset to allow accepting inbound blocks. // Prepare configures the block cache offset to allow accepting inbound blocks.
func (q *queue) Prepare(offset int) { func (q *queue) Prepare(offset uint64) {
q.lock.Lock() q.lock.Lock()
defer q.lock.Unlock() defer q.lock.Unlock()

@ -49,7 +49,7 @@ type ProtocolManager struct {
fetcher *fetcher.Fetcher fetcher *fetcher.Fetcher
peers *peerSet peers *peerSet
SubProtocol p2p.Protocol SubProtocols []p2p.Protocol
eventMux *event.TypeMux eventMux *event.TypeMux
txSub event.Subscription txSub event.Subscription
@ -68,8 +68,8 @@ type ProtocolManager struct {
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network. // with the ethereum network.
func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager) *ProtocolManager { func NewProtocolManager(networkId int, mux *event.TypeMux, txpool txPool, pow pow.PoW, chainman *core.ChainManager) *ProtocolManager {
// Create the protocol manager and initialize peer handlers // Create the protocol manager with the base fields
manager := &ProtocolManager{ manager := &ProtocolManager{
eventMux: mux, eventMux: mux,
txpool: txpool, txpool: txpool,
@ -79,18 +79,24 @@ func NewProtocolManager(protocolVersion, networkId int, mux *event.TypeMux, txpo
txsyncCh: make(chan *txsync), txsyncCh: make(chan *txsync),
quitSync: make(chan struct{}), quitSync: make(chan struct{}),
} }
manager.SubProtocol = p2p.Protocol{ // Initiate a sub-protocol for every implemented version we can handle
Name: "eth", manager.SubProtocols = make([]p2p.Protocol, len(ProtocolVersions))
Version: uint(protocolVersion), for i := 0; i < len(manager.SubProtocols); i++ {
Length: ProtocolLength, version := ProtocolVersions[i]
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(protocolVersion, networkId, p, rw) manager.SubProtocols[i] = p2p.Protocol{
manager.newPeerCh <- peer Name: "eth",
return manager.handle(peer) Version: version,
}, Length: ProtocolLengths[i],
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
peer := manager.newPeer(int(version), networkId, p, rw)
manager.newPeerCh <- peer
return manager.handle(peer)
},
}
} }
// Construct the different synchronisation mechanisms // Construct the different synchronisation mechanisms
manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.InsertChain, manager.removePeer) manager.downloader = downloader.New(manager.eventMux, manager.chainman.HasBlock, manager.chainman.GetBlock, manager.chainman.CurrentBlock, manager.chainman.InsertChain, manager.removePeer)
validator := func(block *types.Block, parent *types.Block) error { validator := func(block *types.Block, parent *types.Block) error {
return core.ValidateHeader(pow, block.Header(), parent, true) return core.ValidateHeader(pow, block.Header(), parent, true)
@ -152,31 +158,32 @@ func (pm *ProtocolManager) Stop() {
} }
func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func (pm *ProtocolManager) newPeer(pv, nv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
td, current, genesis := pm.chainman.Status() return newPeer(pv, nv, p, rw)
return newPeer(pv, nv, genesis, current, td, p, rw)
} }
// handle is the callback invoked to manage the life cycle of an eth peer. When
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error { func (pm *ProtocolManager) handle(p *peer) error {
// Execute the Ethereum handshake. glog.V(logger.Debug).Infof("%v: peer connected [%s]", p, p.Name())
if err := p.handleStatus(); err != nil {
// Execute the Ethereum handshake
td, head, genesis := pm.chainman.Status()
if err := p.Handshake(td, head, genesis); err != nil {
glog.V(logger.Debug).Infof("%v: handshake failed: %v", p, err)
return err return err
} }
// Register the peer locally
// Register the peer locally. glog.V(logger.Detail).Infof("%v: adding peer", p)
glog.V(logger.Detail).Infoln("Adding peer", p.id)
if err := pm.peers.Register(p); err != nil { if err := pm.peers.Register(p); err != nil {
glog.V(logger.Error).Infoln("Addition failed:", err) glog.V(logger.Error).Infof("%v: addition failed: %v", p, err)
return err return err
} }
defer pm.removePeer(p.id) defer pm.removePeer(p.id)
// Register the peer in the downloader. If the downloader // Register the peer in the downloader. If the downloader considers it banned, we disconnect
// considers it banned, we disconnect. if err := pm.downloader.RegisterPeer(p.id, p.version, p.Head(), p.RequestHashes, p.RequestHashesFromNumber, p.RequestBlocks); err != nil {
if err := pm.downloader.RegisterPeer(p.id, p.Head(), p.requestHashes, p.requestBlocks); err != nil {
return err return err
} }
// Propagate existing transactions. new transactions appearing // Propagate existing transactions. new transactions appearing
// after this will be sent via broadcasts. // after this will be sent via broadcasts.
pm.syncTransactions(p) pm.syncTransactions(p)
@ -184,13 +191,17 @@ func (pm *ProtocolManager) handle(p *peer) error {
// main loop. handle incoming messages. // main loop. handle incoming messages.
for { for {
if err := pm.handleMsg(p); err != nil { if err := pm.handleMsg(p); err != nil {
glog.V(logger.Debug).Infof("%v: message handling failed: %v", p, err)
return err return err
} }
} }
return nil return nil
} }
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *ProtocolManager) handleMsg(p *peer) error { func (pm *ProtocolManager) handleMsg(p *peer) error {
// Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg() msg, err := p.rw.ReadMsg()
if err != nil { if err != nil {
return err return err
@ -198,58 +209,69 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if msg.Size > ProtocolMaxMsgSize { if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
} }
// make sure that the payload has been fully consumed
defer msg.Discard() defer msg.Discard()
// Handle the message depending on its contents
switch msg.Code { switch msg.Code {
case StatusMsg: case StatusMsg:
// Status messages should never arrive after the handshake
return errResp(ErrExtraStatusMsg, "uncontrolled status message") return errResp(ErrExtraStatusMsg, "uncontrolled status message")
case TxMsg:
// TODO: rework using lazy RLP stream
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
for i, tx := range txs {
if tx == nil {
return errResp(ErrDecode, "transaction %d is nil", i)
}
jsonlogger.LogJson(&logger.EthTxReceived{
TxHash: tx.Hash().Hex(),
RemoteId: p.ID().String(),
})
}
pm.txpool.AddTransactions(txs)
case GetBlockHashesMsg: case GetBlockHashesMsg:
var request getBlockHashesMsgData // Retrieve the number of hashes to return and from which origin hash
var request getBlockHashesData
if err := msg.Decode(&request); err != nil { if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "->msg %v: %v", msg, err) return errResp(ErrDecode, "%v: %v", msg, err)
} }
if request.Amount > uint64(downloader.MaxHashFetch) { if request.Amount > uint64(downloader.MaxHashFetch) {
request.Amount = uint64(downloader.MaxHashFetch) request.Amount = uint64(downloader.MaxHashFetch)
} }
// Retrieve the hashes from the block chain and return them
hashes := pm.chainman.GetBlockHashesFromHash(request.Hash, request.Amount) hashes := pm.chainman.GetBlockHashesFromHash(request.Hash, request.Amount)
if len(hashes) == 0 {
glog.V(logger.Debug).Infof("invalid block hash %x", request.Hash.Bytes()[:4])
}
return p.SendBlockHashes(hashes)
if glog.V(logger.Debug) { case GetBlockHashesFromNumberMsg:
if len(hashes) == 0 { // Retrieve and decode the number of hashes to return and from which origin number
glog.Infof("invalid block hash %x", request.Hash.Bytes()[:4]) var request getBlockHashesFromNumberData
} if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
if request.Amount > uint64(downloader.MaxHashFetch) {
request.Amount = uint64(downloader.MaxHashFetch)
} }
// Calculate the last block that should be retrieved, and short circuit if unavailable
last := pm.chainman.GetBlockByNumber(request.Number + request.Amount - 1)
if last == nil {
last = pm.chainman.CurrentBlock()
request.Amount = last.NumberU64() - request.Number + 1
}
if last.NumberU64() < request.Number {
return p.SendBlockHashes(nil)
}
// Retrieve the hashes from the last block backwards, reverse and return
hashes := []common.Hash{last.Hash()}
hashes = append(hashes, pm.chainman.GetBlockHashesFromHash(last.Hash(), request.Amount-1)...)
// returns either requested hashes or nothing (i.e. not found) for i := 0; i < len(hashes)/2; i++ {
return p.sendBlockHashes(hashes) hashes[i], hashes[len(hashes)-1-i] = hashes[len(hashes)-1-i], hashes[i]
}
return p.SendBlockHashes(hashes)
case BlockHashesMsg: case BlockHashesMsg:
// A batch of hashes arrived to one of our previous requests
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
reqHashInPacketsMeter.Mark(1)
var hashes []common.Hash var hashes []common.Hash
if err := msgStream.Decode(&hashes); err != nil { if err := msgStream.Decode(&hashes); err != nil {
break break
} }
reqHashInTrafficMeter.Mark(int64(32 * len(hashes)))
// Deliver them all to the downloader for queuing
err := pm.downloader.DeliverHashes(p.id, hashes) err := pm.downloader.DeliverHashes(p.id, hashes)
if err != nil { if err != nil {
glog.V(logger.Debug).Infoln(err) glog.V(logger.Debug).Infoln(err)
@ -293,13 +315,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
list = list[:len(list)-2] + "]" list = list[:len(list)-2] + "]"
glog.Infof("Peer %s: no blocks found for requested hashes %s", p.id, list) glog.Infof("%v: no blocks found for requested hashes %s", p, list)
} }
return p.sendBlocks(blocks) return p.SendBlocks(blocks)
case BlocksMsg: case BlocksMsg:
// Decode the arrived block message // Decode the arrived block message
msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
reqBlockInPacketsMeter.Mark(1)
var blocks []*types.Block var blocks []*types.Block
if err := msgStream.Decode(&blocks); err != nil { if err := msgStream.Decode(&blocks); err != nil {
@ -307,8 +330,9 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
blocks = nil blocks = nil
} }
// Update the receive timestamp of each block // Update the receive timestamp of each block
for i := 0; i < len(blocks); i++ { for _, block := range blocks {
blocks[i].ReceivedAt = msg.ReceivedAt reqBlockInTrafficMeter.Mark(block.Size().Int64())
block.ReceivedAt = msg.ReceivedAt
} }
// Filter out any explicitly requested blocks, deliver the rest to the downloader // Filter out any explicitly requested blocks, deliver the rest to the downloader
if blocks := pm.fetcher.Filter(blocks); len(blocks) > 0 { if blocks := pm.fetcher.Filter(blocks); len(blocks) > 0 {
@ -323,9 +347,12 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msgStream.Decode(&hashes); err != nil { if err := msgStream.Decode(&hashes); err != nil {
break break
} }
propHashInPacketsMeter.Mark(1)
propHashInTrafficMeter.Mark(int64(32 * len(hashes)))
// Mark the hashes as present at the remote node // Mark the hashes as present at the remote node
for _, hash := range hashes { for _, hash := range hashes {
p.blockHashes.Add(hash) p.MarkBlock(hash)
p.SetHead(hash) p.SetHead(hash)
} }
// Schedule all the unknown hashes for retrieval // Schedule all the unknown hashes for retrieval
@ -336,15 +363,18 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
} }
} }
for _, hash := range unknown { for _, hash := range unknown {
pm.fetcher.Notify(p.id, hash, time.Now(), p.requestBlocks) pm.fetcher.Notify(p.id, hash, time.Now(), p.RequestBlocks)
} }
case NewBlockMsg: case NewBlockMsg:
// Retrieve and decode the propagated block // Retrieve and decode the propagated block
var request newBlockMsgData var request newBlockData
if err := msg.Decode(&request); err != nil { if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err) return errResp(ErrDecode, "%v: %v", msg, err)
} }
propBlockInPacketsMeter.Mark(1)
propBlockInTrafficMeter.Mark(request.Block.Size().Int64())
if err := request.Block.ValidateFields(); err != nil { if err := request.Block.ValidateFields(); err != nil {
return errResp(ErrDecode, "block validation %v: %v", msg, err) return errResp(ErrDecode, "block validation %v: %v", msg, err)
} }
@ -360,7 +390,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
RemoteId: p.ID().String(), RemoteId: p.ID().String(),
}) })
// Mark the peer as owning the block and schedule it for import // Mark the peer as owning the block and schedule it for import
p.blockHashes.Add(request.Block.Hash()) p.MarkBlock(request.Block.Hash())
p.SetHead(request.Block.Hash()) p.SetHead(request.Block.Hash())
pm.fetcher.Enqueue(p.id, request.Block) pm.fetcher.Enqueue(p.id, request.Block)
@ -369,6 +399,29 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
p.SetTd(request.TD) p.SetTd(request.TD)
go pm.synchronise(p) go pm.synchronise(p)
case TxMsg:
// Transactions arrived, parse all of them and deliver to the pool
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
propTxnInPacketsMeter.Mark(1)
for i, tx := range txs {
// Validate and mark the remote transaction
if tx == nil {
return errResp(ErrDecode, "transaction %d is nil", i)
}
p.MarkTransaction(tx.Hash())
// Log it's arrival for later analysis
propTxnInTrafficMeter.Mark(tx.Size().Int64())
jsonlogger.LogJson(&logger.EthTxReceived{
TxHash: tx.Hash().Hex(),
RemoteId: p.ID().String(),
})
}
pm.txpool.AddTransactions(txs)
default: default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code) return errResp(ErrInvalidMsgCode, "%v", msg.Code)
} }
@ -385,28 +438,27 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
if propagate { if propagate {
transfer := peers[:int(math.Sqrt(float64(len(peers))))] transfer := peers[:int(math.Sqrt(float64(len(peers))))]
for _, peer := range transfer { for _, peer := range transfer {
peer.sendNewBlock(block) peer.SendNewBlock(block)
} }
glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt)) glog.V(logger.Detail).Infof("propagated block %x to %d peers in %v", hash[:4], len(transfer), time.Since(block.ReceivedAt))
} }
// Otherwise if the block is indeed in out own chain, announce it // Otherwise if the block is indeed in out own chain, announce it
if pm.chainman.HasBlock(hash) { if pm.chainman.HasBlock(hash) {
for _, peer := range peers { for _, peer := range peers {
peer.sendNewBlockHashes([]common.Hash{hash}) peer.SendNewBlockHashes([]common.Hash{hash})
} }
glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt)) glog.V(logger.Detail).Infof("announced block %x to %d peers in %v", hash[:4], len(peers), time.Since(block.ReceivedAt))
} }
} }
// BroadcastTx will propagate the block to its connected peers. It will sort // BroadcastTx will propagate a transaction to all peers which are not known to
// out which peers do not contain the block in their block set and will do a // already have the given transaction.
// sqrt(peers) to determine the amount of peers we broadcast to.
func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) { func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) {
// Broadcast transaction to a batch of peers not knowing about it // Broadcast transaction to a batch of peers not knowing about it
peers := pm.peers.PeersWithoutTx(hash) peers := pm.peers.PeersWithoutTx(hash)
//FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))] //FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
for _, peer := range peers { for _, peer := range peers {
peer.sendTransaction(tx) peer.SendTransactions(types.Transactions{tx})
} }
glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers") glog.V(logger.Detail).Infoln("broadcast tx to", len(peers), "peers")
} }

@ -0,0 +1,28 @@
package eth
import (
"github.com/ethereum/go-ethereum/metrics"
)
var (
propTxnInPacketsMeter = metrics.NewMeter("eth/prop/txns/in/packets")
propTxnInTrafficMeter = metrics.NewMeter("eth/prop/txns/in/traffic")
propTxnOutPacketsMeter = metrics.NewMeter("eth/prop/txns/out/packets")
propTxnOutTrafficMeter = metrics.NewMeter("eth/prop/txns/out/traffic")
propHashInPacketsMeter = metrics.NewMeter("eth/prop/hashes/in/packets")
propHashInTrafficMeter = metrics.NewMeter("eth/prop/hashes/in/traffic")
propHashOutPacketsMeter = metrics.NewMeter("eth/prop/hashes/out/packets")
propHashOutTrafficMeter = metrics.NewMeter("eth/prop/hashes/out/traffic")
propBlockInPacketsMeter = metrics.NewMeter("eth/prop/blocks/in/packets")
propBlockInTrafficMeter = metrics.NewMeter("eth/prop/blocks/in/traffic")
propBlockOutPacketsMeter = metrics.NewMeter("eth/prop/blocks/out/packets")
propBlockOutTrafficMeter = metrics.NewMeter("eth/prop/blocks/out/traffic")
reqHashInPacketsMeter = metrics.NewMeter("eth/req/hashes/in/packets")
reqHashInTrafficMeter = metrics.NewMeter("eth/req/hashes/in/traffic")
reqHashOutPacketsMeter = metrics.NewMeter("eth/req/hashes/out/packets")
reqHashOutTrafficMeter = metrics.NewMeter("eth/req/hashes/out/traffic")
reqBlockInPacketsMeter = metrics.NewMeter("eth/req/blocks/in/packets")
reqBlockInTrafficMeter = metrics.NewMeter("eth/req/blocks/in/traffic")
reqBlockOutPacketsMeter = metrics.NewMeter("eth/req/blocks/out/packets")
reqBlockOutTrafficMeter = metrics.NewMeter("eth/req/blocks/out/traffic")
)

@ -20,25 +20,18 @@ var (
errNotRegistered = errors.New("peer is not registered") errNotRegistered = errors.New("peer is not registered")
) )
type statusMsgData struct { const (
ProtocolVersion uint32 maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
NetworkId uint32 maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS)
TD *big.Int )
CurrentBlock common.Hash
GenesisBlock common.Hash
}
type getBlockHashesMsgData struct {
Hash common.Hash
Amount uint64
}
type peer struct { type peer struct {
*p2p.Peer *p2p.Peer
rw p2p.MsgReadWriter rw p2p.MsgReadWriter
protv, netid int version int // Protocol version negotiated
network int // Network ID being on
id string id string
@ -46,27 +39,21 @@ type peer struct {
td *big.Int td *big.Int
lock sync.RWMutex lock sync.RWMutex
genesis, ourHash common.Hash knownTxs *set.Set // Set of transaction hashes known to be known by this peer
ourTd *big.Int knownBlocks *set.Set // Set of block hashes known to be known by this peer
txHashes *set.Set
blockHashes *set.Set
} }
func newPeer(protv, netid int, genesis, head common.Hash, td *big.Int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { func newPeer(version, network int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID() id := p.ID()
return &peer{ return &peer{
Peer: p, Peer: p,
rw: rw, rw: rw,
genesis: genesis, version: version,
ourHash: head, network: network,
ourTd: td,
protv: protv,
netid: netid,
id: fmt.Sprintf("%x", id[:8]), id: fmt.Sprintf("%x", id[:8]),
txHashes: set.New(), knownTxs: set.New(),
blockHashes: set.New(), knownBlocks: set.New(),
} }
} }
@ -103,68 +90,110 @@ func (p *peer) SetTd(td *big.Int) {
p.td.Set(td) p.td.Set(td)
} }
// sendTransactions sends transactions to the peer and includes the hashes // MarkBlock marks a block as known for the peer, ensuring that the block will
// in it's tx hash set for future reference. The tx hash will allow the // never be propagated to this particular peer.
// manager to check whether the peer has already received this particular func (p *peer) MarkBlock(hash common.Hash) {
// transaction // If we reached the memory allowance, drop a previously known block hash
func (p *peer) sendTransactions(txs types.Transactions) error { for p.knownBlocks.Size() >= maxKnownBlocks {
for _, tx := range txs { p.knownBlocks.Pop()
p.txHashes.Add(tx.Hash())
} }
p.knownBlocks.Add(hash)
}
// MarkTransaction marks a transaction as known for the peer, ensuring that it
// will never be propagated to this particular peer.
func (p *peer) MarkTransaction(hash common.Hash) {
// If we reached the memory allowance, drop a previously known transaction hash
for p.knownTxs.Size() >= maxKnownTxs {
p.knownTxs.Pop()
}
p.knownTxs.Add(hash)
}
// SendTransactions sends transactions to the peer and includes the hashes
// in its transaction hash set for future reference.
func (p *peer) SendTransactions(txs types.Transactions) error {
propTxnOutPacketsMeter.Mark(1)
for _, tx := range txs {
propTxnOutTrafficMeter.Mark(tx.Size().Int64())
p.knownTxs.Add(tx.Hash())
}
return p2p.Send(p.rw, TxMsg, txs) return p2p.Send(p.rw, TxMsg, txs)
} }
func (p *peer) sendBlockHashes(hashes []common.Hash) error { // SendBlockHashes sends a batch of known hashes to the remote peer.
func (p *peer) SendBlockHashes(hashes []common.Hash) error {
reqHashOutPacketsMeter.Mark(1)
reqHashOutTrafficMeter.Mark(int64(32 * len(hashes)))
return p2p.Send(p.rw, BlockHashesMsg, hashes) return p2p.Send(p.rw, BlockHashesMsg, hashes)
} }
func (p *peer) sendBlocks(blocks []*types.Block) error { // SendBlocks sends a batch of blocks to the remote peer.
func (p *peer) SendBlocks(blocks []*types.Block) error {
reqBlockOutPacketsMeter.Mark(1)
for _, block := range blocks {
reqBlockOutTrafficMeter.Mark(block.Size().Int64())
}
return p2p.Send(p.rw, BlocksMsg, blocks) return p2p.Send(p.rw, BlocksMsg, blocks)
} }
func (p *peer) sendNewBlockHashes(hashes []common.Hash) error { // SendNewBlockHashes announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendNewBlockHashes(hashes []common.Hash) error {
propHashOutPacketsMeter.Mark(1)
propHashOutTrafficMeter.Mark(int64(32 * len(hashes)))
for _, hash := range hashes { for _, hash := range hashes {
p.blockHashes.Add(hash) p.knownBlocks.Add(hash)
} }
return p2p.Send(p.rw, NewBlockHashesMsg, hashes) return p2p.Send(p.rw, NewBlockHashesMsg, hashes)
} }
func (p *peer) sendNewBlock(block *types.Block) error { // SendNewBlock propagates an entire block to a remote peer.
p.blockHashes.Add(block.Hash()) func (p *peer) SendNewBlock(block *types.Block) error {
propBlockOutPacketsMeter.Mark(1)
propBlockOutTrafficMeter.Mark(block.Size().Int64())
p.knownBlocks.Add(block.Hash())
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, block.Td}) return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, block.Td})
} }
func (p *peer) sendTransaction(tx *types.Transaction) error { // RequestHashes fetches a batch of hashes from a peer, starting at from, going
p.txHashes.Add(tx.Hash()) // towards the genesis block.
func (p *peer) RequestHashes(from common.Hash) error {
return p2p.Send(p.rw, TxMsg, []*types.Transaction{tx}) glog.V(logger.Debug).Infof("Peer [%s] fetching hashes (%d) from %x...\n", p.id, downloader.MaxHashFetch, from[:4])
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesData{from, uint64(downloader.MaxHashFetch)})
} }
func (p *peer) requestHashes(from common.Hash) error { // RequestHashesFromNumber fetches a batch of hashes from a peer, starting at the
glog.V(logger.Debug).Infof("[%s] fetching hashes (%d) %x...\n", p.id, downloader.MaxHashFetch, from[:4]) // requested block number, going upwards towards the genesis block.
return p2p.Send(p.rw, GetBlockHashesMsg, getBlockHashesMsgData{from, uint64(downloader.MaxHashFetch)}) func (p *peer) RequestHashesFromNumber(from uint64, count int) error {
glog.V(logger.Debug).Infof("Peer [%s] fetching hashes (%d) from #%d...\n", p.id, count, from)
return p2p.Send(p.rw, GetBlockHashesFromNumberMsg, getBlockHashesFromNumberData{from, uint64(count)})
} }
func (p *peer) requestBlocks(hashes []common.Hash) error { // RequestBlocks fetches a batch of blocks corresponding to the specified hashes.
func (p *peer) RequestBlocks(hashes []common.Hash) error {
glog.V(logger.Debug).Infof("[%s] fetching %v blocks\n", p.id, len(hashes)) glog.V(logger.Debug).Infof("[%s] fetching %v blocks\n", p.id, len(hashes))
return p2p.Send(p.rw, GetBlocksMsg, hashes) return p2p.Send(p.rw, GetBlocksMsg, hashes)
} }
func (p *peer) handleStatus() error { // Handshake executes the eth protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(td *big.Int, head common.Hash, genesis common.Hash) error {
// Send out own handshake in a new thread
errc := make(chan error, 1) errc := make(chan error, 1)
go func() { go func() {
errc <- p2p.Send(p.rw, StatusMsg, &statusMsgData{ errc <- p2p.Send(p.rw, StatusMsg, &statusData{
ProtocolVersion: uint32(p.protv), ProtocolVersion: uint32(p.version),
NetworkId: uint32(p.netid), NetworkId: uint32(p.network),
TD: p.ourTd, TD: td,
CurrentBlock: p.ourHash, CurrentBlock: head,
GenesisBlock: p.genesis, GenesisBlock: genesis,
}) })
}() }()
// In the mean time retrieve the remote status message
// read and handle remote status
msg, err := p.rw.ReadMsg() msg, err := p.rw.ReadMsg()
if err != nil { if err != nil {
return err return err
@ -175,31 +204,32 @@ func (p *peer) handleStatus() error {
if msg.Size > ProtocolMaxMsgSize { if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize) return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
} }
// Decode the handshake and make sure everything matches
var status statusMsgData var status statusData
if err := msg.Decode(&status); err != nil { if err := msg.Decode(&status); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err) return errResp(ErrDecode, "msg %v: %v", msg, err)
} }
if status.GenesisBlock != genesis {
if status.GenesisBlock != p.genesis { return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock, genesis)
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", status.GenesisBlock, p.genesis)
} }
if int(status.NetworkId) != p.network {
if int(status.NetworkId) != p.netid { return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, p.network)
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", status.NetworkId, p.netid)
} }
if int(status.ProtocolVersion) != p.version {
if int(status.ProtocolVersion) != p.protv { return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version)
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.protv)
} }
// Set the total difficulty of the peer // Configure the remote peer, and sanity check out handshake too
p.td = status.TD p.td, p.head = status.TD, status.CurrentBlock
// set the best hash of the peer
p.head = status.CurrentBlock
return <-errc return <-errc
} }
// String implements fmt.Stringer.
func (p *peer) String() string {
return fmt.Sprintf("Peer %s [%s]", p.id,
fmt.Sprintf("eth/%2d", p.version),
)
}
// peerSet represents the collection of active peers currently participating in // peerSet represents the collection of active peers currently participating in
// the Ethereum sub-protocol. // the Ethereum sub-protocol.
type peerSet struct { type peerSet struct {
@ -264,7 +294,7 @@ func (ps *peerSet) PeersWithoutBlock(hash common.Hash) []*peer {
list := make([]*peer, 0, len(ps.peers)) list := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers { for _, p := range ps.peers {
if !p.blockHashes.Has(hash) { if !p.knownBlocks.Has(hash) {
list = append(list, p) list = append(list, p)
} }
} }
@ -279,7 +309,7 @@ func (ps *peerSet) PeersWithoutTx(hash common.Hash) []*peer {
list := make([]*peer, 0, len(ps.peers)) list := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers { for _, p := range ps.peers {
if !p.txHashes.Has(hash) { if !p.knownTxs.Has(hash) {
list = append(list, p) list = append(list, p)
} }
} }

@ -7,11 +7,15 @@ import (
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
) )
// Supported versions of the eth protocol (first is primary).
var ProtocolVersions = []uint{61, 60}
// Number of implemented message corresponding to different protocol versions.
var ProtocolLengths = []uint64{9, 8}
const ( const (
ProtocolVersion = 60
NetworkId = 0 NetworkId = 0
ProtocolLength = uint64(8) ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
ProtocolMaxMsgSize = 10 * 1024 * 1024
) )
// eth protocol message codes // eth protocol message codes
@ -24,6 +28,7 @@ const (
GetBlocksMsg GetBlocksMsg
BlocksMsg BlocksMsg
NewBlockMsg NewBlockMsg
GetBlockHashesFromNumberMsg
) )
type errCode int type errCode int
@ -72,8 +77,31 @@ type chainManager interface {
Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash) Status() (td *big.Int, currentBlock common.Hash, genesisBlock common.Hash)
} }
// message structs used for RLP serialization // statusData is the network packet for the status message.
type newBlockMsgData struct { type statusData struct {
ProtocolVersion uint32
NetworkId uint32
TD *big.Int
CurrentBlock common.Hash
GenesisBlock common.Hash
}
// getBlockHashesData is the network packet for the hash based block retrieval
// message.
type getBlockHashesData struct {
Hash common.Hash
Amount uint64
}
// getBlockHashesFromNumberData is the network packet for the number based block
// retrieval message.
type getBlockHashesFromNumberData struct {
Number uint64
Amount uint64
}
// newBlockData is the network packet for the block propagation message.
type newBlockData struct {
Block *types.Block Block *types.Block
TD *big.Int TD *big.Int
} }

@ -39,15 +39,15 @@ func TestStatusMsgErrors(t *testing.T) {
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"), wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
}, },
{ {
code: StatusMsg, data: statusMsgData{10, NetworkId, td, currentBlock, genesis}, code: StatusMsg, data: statusData{10, NetworkId, td, currentBlock, genesis},
wantError: errResp(ErrProtocolVersionMismatch, "10 (!= 0)"), wantError: errResp(ErrProtocolVersionMismatch, "10 (!= 0)"),
}, },
{ {
code: StatusMsg, data: statusMsgData{ProtocolVersion, 999, td, currentBlock, genesis}, code: StatusMsg, data: statusData{uint32(ProtocolVersions[0]), 999, td, currentBlock, genesis},
wantError: errResp(ErrNetworkIdMismatch, "999 (!= 0)"), wantError: errResp(ErrNetworkIdMismatch, "999 (!= 0)"),
}, },
{ {
code: StatusMsg, data: statusMsgData{ProtocolVersion, NetworkId, td, currentBlock, common.Hash{3}}, code: StatusMsg, data: statusData{uint32(ProtocolVersions[0]), NetworkId, td, currentBlock, common.Hash{3}},
wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis), wantError: errResp(ErrGenesisBlockMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis),
}, },
} }
@ -167,7 +167,7 @@ func newProtocolManagerForTesting(txAdded chan<- []*types.Transaction) *Protocol
db, _ = ethdb.NewMemDatabase() db, _ = ethdb.NewMemDatabase()
chain, _ = core.NewChainManager(core.GenesisBlock(0, db), db, db, core.FakePow{}, em) chain, _ = core.NewChainManager(core.GenesisBlock(0, db), db, db, core.FakePow{}, em)
txpool = &fakeTxPool{added: txAdded} txpool = &fakeTxPool{added: txAdded}
pm = NewProtocolManager(ProtocolVersion, 0, em, txpool, core.FakePow{}, chain) pm = NewProtocolManager(0, em, txpool, core.FakePow{}, chain)
) )
pm.Start() pm.Start()
return pm return pm
@ -188,7 +188,7 @@ func newTestPeer(pm *ProtocolManager) (*testPeer, <-chan error) {
func (p *testPeer) handshake(t *testing.T) { func (p *testPeer) handshake(t *testing.T) {
td, currentBlock, genesis := p.pm.chainman.Status() td, currentBlock, genesis := p.pm.chainman.Status()
msg := &statusMsgData{ msg := &statusData{
ProtocolVersion: uint32(p.pm.protVer), ProtocolVersion: uint32(p.pm.protVer),
NetworkId: uint32(p.pm.netId), NetworkId: uint32(p.pm.netId),
TD: td, TD: td,

@ -20,14 +20,6 @@ const (
txsyncPackSize = 100 * 1024 txsyncPackSize = 100 * 1024
) )
// blockAnnounce is the hash notification of the availability of a new block in
// the network.
type blockAnnounce struct {
hash common.Hash
peer *peer
time time.Time
}
type txsync struct { type txsync struct {
p *peer p *peer
txs []*types.Transaction txs []*types.Transaction
@ -75,7 +67,7 @@ func (pm *ProtocolManager) txsyncLoop() {
// Send the pack in the background. // Send the pack in the background.
glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size) glog.V(logger.Detail).Infof("%v: sending %d transactions (%v)", s.p.Peer, len(pack.txs), size)
sending = true sending = true
go func() { done <- pack.p.sendTransactions(pack.txs) }() go func() { done <- pack.p.SendTransactions(pack.txs) }()
} }
// pick chooses the next pending sync. // pick chooses the next pending sync.

Loading…
Cancel
Save