mirror of https://github.com/ethereum/go-ethereum
cmd/devp2p, p2p: dial using node iterator, discovery crawler (#20132)
* p2p/enode: add Iterator and associated utilities * p2p/discover: add RandomNodes iterator * p2p: dial using iterator * cmd/devp2p: add discv4 crawler * cmd/devp2p: WIP nodeset filter * cmd/devp2p: fixup lesFilter * core/forkid: add NewStaticFilter * cmd/devp2p: make -eth-network filter actually work * cmd/devp2p: improve crawl timestamp handling * cmd/devp2p: fix typo * p2p/enode: fix comment typos * p2p/discover: fix comment typos * p2p/discover: rename lookup.next to 'advance' * p2p: lower discovery mixer timeout * p2p/enode: implement dynamic FairMix timeouts * cmd/devp2p: add ropsten support in -eth-network filter * cmd/devp2p: tweak crawler log messagepull/20140/head
parent
b0b277525c
commit
2c37142d2f
@ -0,0 +1,152 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
) |
||||
|
||||
type crawler struct { |
||||
input nodeSet |
||||
output nodeSet |
||||
disc *discover.UDPv4 |
||||
iters []enode.Iterator |
||||
inputIter enode.Iterator |
||||
ch chan *enode.Node |
||||
closed chan struct{} |
||||
|
||||
// settings
|
||||
revalidateInterval time.Duration |
||||
} |
||||
|
||||
func newCrawler(input nodeSet, disc *discover.UDPv4, iters ...enode.Iterator) *crawler { |
||||
c := &crawler{ |
||||
input: input, |
||||
output: make(nodeSet, len(input)), |
||||
disc: disc, |
||||
iters: iters, |
||||
inputIter: enode.IterNodes(input.nodes()), |
||||
ch: make(chan *enode.Node), |
||||
closed: make(chan struct{}), |
||||
} |
||||
c.iters = append(c.iters, c.inputIter) |
||||
// Copy input to output initially. Any nodes that fail validation
|
||||
// will be dropped from output during the run.
|
||||
for id, n := range input { |
||||
c.output[id] = n |
||||
} |
||||
return c |
||||
} |
||||
|
||||
func (c *crawler) run(timeout time.Duration) nodeSet { |
||||
var ( |
||||
timeoutTimer = time.NewTimer(timeout) |
||||
timeoutCh <-chan time.Time |
||||
doneCh = make(chan enode.Iterator, len(c.iters)) |
||||
liveIters = len(c.iters) |
||||
) |
||||
for _, it := range c.iters { |
||||
go c.runIterator(doneCh, it) |
||||
} |
||||
|
||||
loop: |
||||
for { |
||||
select { |
||||
case n := <-c.ch: |
||||
c.updateNode(n) |
||||
case it := <-doneCh: |
||||
if it == c.inputIter { |
||||
// Enable timeout when we're done revalidating the input nodes.
|
||||
log.Info("Revalidation of input set is done", "len", len(c.input)) |
||||
if timeout > 0 { |
||||
timeoutCh = timeoutTimer.C |
||||
} |
||||
} |
||||
if liveIters--; liveIters == 0 { |
||||
break loop |
||||
} |
||||
case <-timeoutCh: |
||||
break loop |
||||
} |
||||
} |
||||
|
||||
close(c.closed) |
||||
for _, it := range c.iters { |
||||
it.Close() |
||||
} |
||||
for ; liveIters > 0; liveIters-- { |
||||
<-doneCh |
||||
} |
||||
return c.output |
||||
} |
||||
|
||||
func (c *crawler) runIterator(done chan<- enode.Iterator, it enode.Iterator) { |
||||
defer func() { done <- it }() |
||||
for it.Next() { |
||||
select { |
||||
case c.ch <- it.Node(): |
||||
case <-c.closed: |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (c *crawler) updateNode(n *enode.Node) { |
||||
node, ok := c.output[n.ID()] |
||||
|
||||
// Skip validation of recently-seen nodes.
|
||||
if ok && time.Since(node.LastCheck) < c.revalidateInterval { |
||||
return |
||||
} |
||||
|
||||
// Request the node record.
|
||||
nn, err := c.disc.RequestENR(n) |
||||
node.LastCheck = truncNow() |
||||
if err != nil { |
||||
if node.Score == 0 { |
||||
// Node doesn't implement EIP-868.
|
||||
log.Debug("Skipping node", "id", n.ID()) |
||||
return |
||||
} |
||||
node.Score /= 2 |
||||
} else { |
||||
node.N = nn |
||||
node.Seq = nn.Seq() |
||||
node.Score++ |
||||
if node.FirstResponse.IsZero() { |
||||
node.FirstResponse = node.LastCheck |
||||
} |
||||
node.LastResponse = node.LastCheck |
||||
} |
||||
|
||||
// Store/update node in output set.
|
||||
if node.Score <= 0 { |
||||
log.Info("Removing node", "id", n.ID()) |
||||
delete(c.output, n.ID()) |
||||
} else { |
||||
log.Info("Updating node", "id", n.ID(), "seq", n.Seq(), "score", node.Score) |
||||
c.output[n.ID()] = node |
||||
} |
||||
} |
||||
|
||||
func truncNow() time.Time { |
||||
return time.Now().UTC().Truncate(1 * time.Second) |
||||
} |
@ -0,0 +1,193 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/core/forkid" |
||||
"github.com/ethereum/go-ethereum/p2p/enr" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var ( |
||||
nodesetCommand = cli.Command{ |
||||
Name: "nodeset", |
||||
Usage: "Node set tools", |
||||
Subcommands: []cli.Command{ |
||||
nodesetInfoCommand, |
||||
nodesetFilterCommand, |
||||
}, |
||||
} |
||||
nodesetInfoCommand = cli.Command{ |
||||
Name: "info", |
||||
Usage: "Shows statistics about a node set", |
||||
Action: nodesetInfo, |
||||
ArgsUsage: "<nodes.json>", |
||||
} |
||||
nodesetFilterCommand = cli.Command{ |
||||
Name: "filter", |
||||
Usage: "Filters a node set", |
||||
Action: nodesetFilter, |
||||
ArgsUsage: "<nodes.json> filters..", |
||||
|
||||
SkipFlagParsing: true, |
||||
} |
||||
) |
||||
|
||||
func nodesetInfo(ctx *cli.Context) error { |
||||
if ctx.NArg() < 1 { |
||||
return fmt.Errorf("need nodes file as argument") |
||||
} |
||||
|
||||
ns := loadNodesJSON(ctx.Args().First()) |
||||
fmt.Printf("Set contains %d nodes.\n", len(ns)) |
||||
return nil |
||||
} |
||||
|
||||
func nodesetFilter(ctx *cli.Context) error { |
||||
if ctx.NArg() < 1 { |
||||
return fmt.Errorf("need nodes file as argument") |
||||
} |
||||
ns := loadNodesJSON(ctx.Args().First()) |
||||
filter, err := andFilter(ctx.Args().Tail()) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
result := make(nodeSet) |
||||
for id, n := range ns { |
||||
if filter(n) { |
||||
result[id] = n |
||||
} |
||||
} |
||||
writeNodesJSON("-", result) |
||||
return nil |
||||
} |
||||
|
||||
type nodeFilter func(nodeJSON) bool |
||||
|
||||
type nodeFilterC struct { |
||||
narg int |
||||
fn func([]string) (nodeFilter, error) |
||||
} |
||||
|
||||
var filterFlags = map[string]nodeFilterC{ |
||||
"-ip": {1, ipFilter}, |
||||
"-min-age": {1, minAgeFilter}, |
||||
"-eth-network": {1, ethFilter}, |
||||
"-les-server": {0, lesFilter}, |
||||
} |
||||
|
||||
func parseFilters(args []string) ([]nodeFilter, error) { |
||||
var filters []nodeFilter |
||||
for len(args) > 0 { |
||||
fc, ok := filterFlags[args[0]] |
||||
if !ok { |
||||
return nil, fmt.Errorf("invalid filter %q", args[0]) |
||||
} |
||||
if len(args) < fc.narg { |
||||
return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)) |
||||
} |
||||
filter, err := fc.fn(args[1:]) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("%s: %v", args[0], err) |
||||
} |
||||
filters = append(filters, filter) |
||||
args = args[fc.narg+1:] |
||||
} |
||||
return filters, nil |
||||
} |
||||
|
||||
func andFilter(args []string) (nodeFilter, error) { |
||||
checks, err := parseFilters(args) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
f := func(n nodeJSON) bool { |
||||
for _, filter := range checks { |
||||
if !filter(n) { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
return f, nil |
||||
} |
||||
|
||||
func ipFilter(args []string) (nodeFilter, error) { |
||||
_, cidr, err := net.ParseCIDR(args[0]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
f := func(n nodeJSON) bool { return cidr.Contains(n.N.IP()) } |
||||
return f, nil |
||||
} |
||||
|
||||
func minAgeFilter(args []string) (nodeFilter, error) { |
||||
minage, err := time.ParseDuration(args[0]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
f := func(n nodeJSON) bool { |
||||
age := n.LastResponse.Sub(n.FirstResponse) |
||||
return age >= minage |
||||
} |
||||
return f, nil |
||||
} |
||||
|
||||
func ethFilter(args []string) (nodeFilter, error) { |
||||
var filter func(forkid.ID) error |
||||
switch args[0] { |
||||
case "mainnet": |
||||
filter = forkid.NewStaticFilter(params.MainnetChainConfig, params.MainnetGenesisHash) |
||||
case "rinkeby": |
||||
filter = forkid.NewStaticFilter(params.RinkebyChainConfig, params.RinkebyGenesisHash) |
||||
case "goerli": |
||||
filter = forkid.NewStaticFilter(params.GoerliChainConfig, params.GoerliGenesisHash) |
||||
case "ropsten": |
||||
filter = forkid.NewStaticFilter(params.TestnetChainConfig, params.TestnetGenesisHash) |
||||
default: |
||||
return nil, fmt.Errorf("unknown network %q", args[0]) |
||||
} |
||||
|
||||
f := func(n nodeJSON) bool { |
||||
var eth struct { |
||||
ForkID forkid.ID |
||||
_ []rlp.RawValue `rlp:"tail"` |
||||
} |
||||
if n.N.Load(enr.WithEntry("eth", ð)) != nil { |
||||
return false |
||||
} |
||||
return filter(eth.ForkID) == nil |
||||
} |
||||
return f, nil |
||||
} |
||||
|
||||
func lesFilter(args []string) (nodeFilter, error) { |
||||
f := func(n nodeJSON) bool { |
||||
var les struct { |
||||
_ []rlp.RawValue `rlp:"tail"` |
||||
} |
||||
return n.N.Load(enr.WithEntry("les", &les)) == nil |
||||
} |
||||
return f, nil |
||||
} |
@ -0,0 +1,209 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package discover |
||||
|
||||
import ( |
||||
"context" |
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode" |
||||
) |
||||
|
||||
// lookup performs a network search for nodes close to the given target. It approaches the
|
||||
// target by querying nodes that are closer to it on each iteration. The given target does
|
||||
// not need to be an actual node identifier.
|
||||
type lookup struct { |
||||
tab *Table |
||||
queryfunc func(*node) ([]*node, error) |
||||
replyCh chan []*node |
||||
cancelCh <-chan struct{} |
||||
asked, seen map[enode.ID]bool |
||||
result nodesByDistance |
||||
replyBuffer []*node |
||||
queries int |
||||
} |
||||
|
||||
type queryFunc func(*node) ([]*node, error) |
||||
|
||||
func newLookup(ctx context.Context, tab *Table, target enode.ID, q queryFunc) *lookup { |
||||
it := &lookup{ |
||||
tab: tab, |
||||
queryfunc: q, |
||||
asked: make(map[enode.ID]bool), |
||||
seen: make(map[enode.ID]bool), |
||||
result: nodesByDistance{target: target}, |
||||
replyCh: make(chan []*node, alpha), |
||||
cancelCh: ctx.Done(), |
||||
queries: -1, |
||||
} |
||||
// Don't query further if we hit ourself.
|
||||
// Unlikely to happen often in practice.
|
||||
it.asked[tab.self().ID()] = true |
||||
return it |
||||
} |
||||
|
||||
// run runs the lookup to completion and returns the closest nodes found.
|
||||
func (it *lookup) run() []*enode.Node { |
||||
for it.advance() { |
||||
} |
||||
return unwrapNodes(it.result.entries) |
||||
} |
||||
|
||||
// advance advances the lookup until any new nodes have been found.
|
||||
// It returns false when the lookup has ended.
|
||||
func (it *lookup) advance() bool { |
||||
for it.startQueries() { |
||||
select { |
||||
case nodes := <-it.replyCh: |
||||
it.replyBuffer = it.replyBuffer[:0] |
||||
for _, n := range nodes { |
||||
if n != nil && !it.seen[n.ID()] { |
||||
it.seen[n.ID()] = true |
||||
it.result.push(n, bucketSize) |
||||
it.replyBuffer = append(it.replyBuffer, n) |
||||
} |
||||
} |
||||
it.queries-- |
||||
if len(it.replyBuffer) > 0 { |
||||
return true |
||||
} |
||||
case <-it.cancelCh: |
||||
it.shutdown() |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (it *lookup) shutdown() { |
||||
for it.queries > 0 { |
||||
<-it.replyCh |
||||
it.queries-- |
||||
} |
||||
it.queryfunc = nil |
||||
it.replyBuffer = nil |
||||
} |
||||
|
||||
func (it *lookup) startQueries() bool { |
||||
if it.queryfunc == nil { |
||||
return false |
||||
} |
||||
|
||||
// The first query returns nodes from the local table.
|
||||
if it.queries == -1 { |
||||
it.tab.mutex.Lock() |
||||
closest := it.tab.closest(it.result.target, bucketSize, false) |
||||
it.tab.mutex.Unlock() |
||||
it.queries = 1 |
||||
it.replyCh <- closest.entries |
||||
return true |
||||
} |
||||
|
||||
// Ask the closest nodes that we haven't asked yet.
|
||||
for i := 0; i < len(it.result.entries) && it.queries < alpha; i++ { |
||||
n := it.result.entries[i] |
||||
if !it.asked[n.ID()] { |
||||
it.asked[n.ID()] = true |
||||
it.queries++ |
||||
go it.query(n, it.replyCh) |
||||
} |
||||
} |
||||
// The lookup ends when no more nodes can be asked.
|
||||
return it.queries > 0 |
||||
} |
||||
|
||||
func (it *lookup) query(n *node, reply chan<- []*node) { |
||||
fails := it.tab.db.FindFails(n.ID(), n.IP()) |
||||
r, err := it.queryfunc(n) |
||||
if err == errClosed { |
||||
// Avoid recording failures on shutdown.
|
||||
reply <- nil |
||||
return |
||||
} else if len(r) == 0 { |
||||
fails++ |
||||
it.tab.db.UpdateFindFails(n.ID(), n.IP(), fails) |
||||
it.tab.log.Trace("Findnode failed", "id", n.ID(), "failcount", fails, "err", err) |
||||
if fails >= maxFindnodeFailures { |
||||
it.tab.log.Trace("Too many findnode failures, dropping", "id", n.ID(), "failcount", fails) |
||||
it.tab.delete(n) |
||||
} |
||||
} else if fails > 0 { |
||||
// Reset failure counter because it counts _consecutive_ failures.
|
||||
it.tab.db.UpdateFindFails(n.ID(), n.IP(), 0) |
||||
} |
||||
|
||||
// Grab as many nodes as possible. Some of them might not be alive anymore, but we'll
|
||||
// just remove those again during revalidation.
|
||||
for _, n := range r { |
||||
it.tab.addSeenNode(n) |
||||
} |
||||
reply <- r |
||||
} |
||||
|
||||
// lookupIterator performs lookup operations and iterates over all seen nodes.
|
||||
// When a lookup finishes, a new one is created through nextLookup.
|
||||
type lookupIterator struct { |
||||
buffer []*node |
||||
nextLookup lookupFunc |
||||
ctx context.Context |
||||
cancel func() |
||||
lookup *lookup |
||||
} |
||||
|
||||
type lookupFunc func(ctx context.Context) *lookup |
||||
|
||||
func newLookupIterator(ctx context.Context, next lookupFunc) *lookupIterator { |
||||
ctx, cancel := context.WithCancel(ctx) |
||||
return &lookupIterator{ctx: ctx, cancel: cancel, nextLookup: next} |
||||
} |
||||
|
||||
// Node returns the current node.
|
||||
func (it *lookupIterator) Node() *enode.Node { |
||||
if len(it.buffer) == 0 { |
||||
return nil |
||||
} |
||||
return unwrapNode(it.buffer[0]) |
||||
} |
||||
|
||||
// Next moves to the next node.
|
||||
func (it *lookupIterator) Next() bool { |
||||
// Consume next node in buffer.
|
||||
if len(it.buffer) > 0 { |
||||
it.buffer = it.buffer[1:] |
||||
} |
||||
// Advance the lookup to refill the buffer.
|
||||
for len(it.buffer) == 0 { |
||||
if it.ctx.Err() != nil { |
||||
it.lookup = nil |
||||
it.buffer = nil |
||||
return false |
||||
} |
||||
if it.lookup == nil { |
||||
it.lookup = it.nextLookup(it.ctx) |
||||
continue |
||||
} |
||||
if !it.lookup.advance() { |
||||
it.lookup = nil |
||||
continue |
||||
} |
||||
it.buffer = it.lookup.replyBuffer |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// Close ends the iterator.
|
||||
func (it *lookupIterator) Close() { |
||||
it.cancel() |
||||
} |
@ -0,0 +1,286 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package enode |
||||
|
||||
import ( |
||||
"sync" |
||||
"time" |
||||
) |
||||
|
||||
// Iterator represents a sequence of nodes. The Next method moves to the next node in the
|
||||
// sequence. It returns false when the sequence has ended or the iterator is closed. Close
|
||||
// may be called concurrently with Next and Node, and interrupts Next if it is blocked.
|
||||
type Iterator interface { |
||||
Next() bool // moves to next node
|
||||
Node() *Node // returns current node
|
||||
Close() // ends the iterator
|
||||
} |
||||
|
||||
// ReadNodes reads at most n nodes from the given iterator. The return value contains no
|
||||
// duplicates and no nil values. To prevent looping indefinitely for small repeating node
|
||||
// sequences, this function calls Next at most n times.
|
||||
func ReadNodes(it Iterator, n int) []*Node { |
||||
seen := make(map[ID]*Node, n) |
||||
for i := 0; i < n && it.Next(); i++ { |
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
node := it.Node() |
||||
prevNode, ok := seen[node.ID()] |
||||
if ok && prevNode.Seq() > node.Seq() { |
||||
continue |
||||
} |
||||
seen[node.ID()] = node |
||||
} |
||||
result := make([]*Node, 0, len(seen)) |
||||
for _, node := range seen { |
||||
result = append(result, node) |
||||
} |
||||
return result |
||||
} |
||||
|
||||
// IterNodes makes an iterator which runs through the given nodes once.
|
||||
func IterNodes(nodes []*Node) Iterator { |
||||
return &sliceIter{nodes: nodes, index: -1} |
||||
} |
||||
|
||||
// CycleNodes makes an iterator which cycles through the given nodes indefinitely.
|
||||
func CycleNodes(nodes []*Node) Iterator { |
||||
return &sliceIter{nodes: nodes, index: -1, cycle: true} |
||||
} |
||||
|
||||
type sliceIter struct { |
||||
mu sync.Mutex |
||||
nodes []*Node |
||||
index int |
||||
cycle bool |
||||
} |
||||
|
||||
func (it *sliceIter) Next() bool { |
||||
it.mu.Lock() |
||||
defer it.mu.Unlock() |
||||
|
||||
if len(it.nodes) == 0 { |
||||
return false |
||||
} |
||||
it.index++ |
||||
if it.index == len(it.nodes) { |
||||
if it.cycle { |
||||
it.index = 0 |
||||
} else { |
||||
it.nodes = nil |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
func (it *sliceIter) Node() *Node { |
||||
if len(it.nodes) == 0 { |
||||
return nil |
||||
} |
||||
return it.nodes[it.index] |
||||
} |
||||
|
||||
func (it *sliceIter) Close() { |
||||
it.mu.Lock() |
||||
defer it.mu.Unlock() |
||||
|
||||
it.nodes = nil |
||||
} |
||||
|
||||
// Filter wraps an iterator such that Next only returns nodes for which
|
||||
// the 'check' function returns true.
|
||||
func Filter(it Iterator, check func(*Node) bool) Iterator { |
||||
return &filterIter{it, check} |
||||
} |
||||
|
||||
type filterIter struct { |
||||
Iterator |
||||
check func(*Node) bool |
||||
} |
||||
|
||||
func (f *filterIter) Next() bool { |
||||
for f.Iterator.Next() { |
||||
if f.check(f.Node()) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// FairMix aggregates multiple node iterators. The mixer itself is an iterator which ends
|
||||
// only when Close is called. Source iterators added via AddSource are removed from the
|
||||
// mix when they end.
|
||||
//
|
||||
// The distribution of nodes returned by Next is approximately fair, i.e. FairMix
|
||||
// attempts to draw from all sources equally often. However, if a certain source is slow
|
||||
// and doesn't return a node within the configured timeout, a node from any other source
|
||||
// will be returned.
|
||||
//
|
||||
// It's safe to call AddSource and Close concurrently with Next.
|
||||
type FairMix struct { |
||||
wg sync.WaitGroup |
||||
fromAny chan *Node |
||||
timeout time.Duration |
||||
cur *Node |
||||
|
||||
mu sync.Mutex |
||||
closed chan struct{} |
||||
sources []*mixSource |
||||
last int |
||||
} |
||||
|
||||
type mixSource struct { |
||||
it Iterator |
||||
next chan *Node |
||||
timeout time.Duration |
||||
} |
||||
|
||||
// NewFairMix creates a mixer.
|
||||
//
|
||||
// The timeout specifies how long the mixer will wait for the next fairly-chosen source
|
||||
// before giving up and taking a node from any other source. A good way to set the timeout
|
||||
// is deciding how long you'd want to wait for a node on average. Passing a negative
|
||||
// timeout makes the mixer completely fair.
|
||||
func NewFairMix(timeout time.Duration) *FairMix { |
||||
m := &FairMix{ |
||||
fromAny: make(chan *Node), |
||||
closed: make(chan struct{}), |
||||
timeout: timeout, |
||||
} |
||||
return m |
||||
} |
||||
|
||||
// AddSource adds a source of nodes.
|
||||
func (m *FairMix) AddSource(it Iterator) { |
||||
m.mu.Lock() |
||||
defer m.mu.Unlock() |
||||
|
||||
if m.closed == nil { |
||||
return |
||||
} |
||||
m.wg.Add(1) |
||||
source := &mixSource{it, make(chan *Node), m.timeout} |
||||
m.sources = append(m.sources, source) |
||||
go m.runSource(m.closed, source) |
||||
} |
||||
|
||||
// Close shuts down the mixer and all current sources.
|
||||
// Calling this is required to release resources associated with the mixer.
|
||||
func (m *FairMix) Close() { |
||||
m.mu.Lock() |
||||
defer m.mu.Unlock() |
||||
|
||||
if m.closed == nil { |
||||
return |
||||
} |
||||
for _, s := range m.sources { |
||||
s.it.Close() |
||||
} |
||||
close(m.closed) |
||||
m.wg.Wait() |
||||
close(m.fromAny) |
||||
m.sources = nil |
||||
m.closed = nil |
||||
} |
||||
|
||||
// Next returns a node from a random source.
|
||||
func (m *FairMix) Next() bool { |
||||
m.cur = nil |
||||
|
||||
var timeout <-chan time.Time |
||||
if m.timeout >= 0 { |
||||
timer := time.NewTimer(m.timeout) |
||||
timeout = timer.C |
||||
defer timer.Stop() |
||||
} |
||||
for { |
||||
source := m.pickSource() |
||||
if source == nil { |
||||
return m.nextFromAny() |
||||
} |
||||
select { |
||||
case n, ok := <-source.next: |
||||
if ok { |
||||
m.cur = n |
||||
source.timeout = m.timeout |
||||
return true |
||||
} |
||||
// This source has ended.
|
||||
m.deleteSource(source) |
||||
case <-timeout: |
||||
source.timeout /= 2 |
||||
return m.nextFromAny() |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Node returns the current node.
|
||||
func (m *FairMix) Node() *Node { |
||||
return m.cur |
||||
} |
||||
|
||||
// nextFromAny is used when there are no sources or when the 'fair' choice
|
||||
// doesn't turn up a node quickly enough.
|
||||
func (m *FairMix) nextFromAny() bool { |
||||
n, ok := <-m.fromAny |
||||
if ok { |
||||
m.cur = n |
||||
} |
||||
return ok |
||||
} |
||||
|
||||
// pickSource chooses the next source to read from, cycling through them in order.
|
||||
func (m *FairMix) pickSource() *mixSource { |
||||
m.mu.Lock() |
||||
defer m.mu.Unlock() |
||||
|
||||
if len(m.sources) == 0 { |
||||
return nil |
||||
} |
||||
m.last = (m.last + 1) % len(m.sources) |
||||
return m.sources[m.last] |
||||
} |
||||
|
||||
// deleteSource deletes a source.
|
||||
func (m *FairMix) deleteSource(s *mixSource) { |
||||
m.mu.Lock() |
||||
defer m.mu.Unlock() |
||||
|
||||
for i := range m.sources { |
||||
if m.sources[i] == s { |
||||
copy(m.sources[i:], m.sources[i+1:]) |
||||
m.sources[len(m.sources)-1] = nil |
||||
m.sources = m.sources[:len(m.sources)-1] |
||||
break |
||||
} |
||||
} |
||||
} |
||||
|
||||
// runSource reads a single source in a loop.
|
||||
func (m *FairMix) runSource(closed chan struct{}, s *mixSource) { |
||||
defer m.wg.Done() |
||||
defer close(s.next) |
||||
for s.it.Next() { |
||||
n := s.it.Node() |
||||
select { |
||||
case s.next <- n: |
||||
case m.fromAny <- n: |
||||
case <-closed: |
||||
return |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,291 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package enode |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"runtime" |
||||
"sync/atomic" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enr" |
||||
) |
||||
|
||||
func TestReadNodes(t *testing.T) { |
||||
nodes := ReadNodes(new(genIter), 10) |
||||
checkNodes(t, nodes, 10) |
||||
} |
||||
|
||||
// This test checks that ReadNodes terminates when reading N nodes from an iterator
|
||||
// which returns less than N nodes in an endless cycle.
|
||||
func TestReadNodesCycle(t *testing.T) { |
||||
iter := &callCountIter{ |
||||
Iterator: CycleNodes([]*Node{ |
||||
testNode(0, 0), |
||||
testNode(1, 0), |
||||
testNode(2, 0), |
||||
}), |
||||
} |
||||
nodes := ReadNodes(iter, 10) |
||||
checkNodes(t, nodes, 3) |
||||
if iter.count != 10 { |
||||
t.Fatalf("%d calls to Next, want %d", iter.count, 100) |
||||
} |
||||
} |
||||
|
||||
func TestFilterNodes(t *testing.T) { |
||||
nodes := make([]*Node, 100) |
||||
for i := range nodes { |
||||
nodes[i] = testNode(uint64(i), uint64(i)) |
||||
} |
||||
|
||||
it := Filter(IterNodes(nodes), func(n *Node) bool { |
||||
return n.Seq() >= 50 |
||||
}) |
||||
for i := 50; i < len(nodes); i++ { |
||||
if !it.Next() { |
||||
t.Fatal("Next returned false") |
||||
} |
||||
if it.Node() != nodes[i] { |
||||
t.Fatalf("iterator returned wrong node %v\nwant %v", it.Node(), nodes[i]) |
||||
} |
||||
} |
||||
if it.Next() { |
||||
t.Fatal("Next returned true after underlying iterator has ended") |
||||
} |
||||
} |
||||
|
||||
func checkNodes(t *testing.T, nodes []*Node, wantLen int) { |
||||
if len(nodes) != wantLen { |
||||
t.Errorf("slice has %d nodes, want %d", len(nodes), wantLen) |
||||
return |
||||
} |
||||
seen := make(map[ID]bool) |
||||
for i, e := range nodes { |
||||
if e == nil { |
||||
t.Errorf("nil node at index %d", i) |
||||
return |
||||
} |
||||
if seen[e.ID()] { |
||||
t.Errorf("slice has duplicate node %v", e.ID()) |
||||
return |
||||
} |
||||
seen[e.ID()] = true |
||||
} |
||||
} |
||||
|
||||
// This test checks fairness of FairMix in the happy case where all sources return nodes
|
||||
// within the context's deadline.
|
||||
func TestFairMix(t *testing.T) { |
||||
for i := 0; i < 500; i++ { |
||||
testMixerFairness(t) |
||||
} |
||||
} |
||||
|
||||
func testMixerFairness(t *testing.T) { |
||||
mix := NewFairMix(1 * time.Second) |
||||
mix.AddSource(&genIter{index: 1}) |
||||
mix.AddSource(&genIter{index: 2}) |
||||
mix.AddSource(&genIter{index: 3}) |
||||
defer mix.Close() |
||||
|
||||
nodes := ReadNodes(mix, 500) |
||||
checkNodes(t, nodes, 500) |
||||
|
||||
// Verify that the nodes slice contains an approximately equal number of nodes
|
||||
// from each source.
|
||||
d := idPrefixDistribution(nodes) |
||||
for _, count := range d { |
||||
if approxEqual(count, len(nodes)/3, 30) { |
||||
t.Fatalf("ID distribution is unfair: %v", d) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// This test checks that FairMix falls back to an alternative source when
|
||||
// the 'fair' choice doesn't return a node within the timeout.
|
||||
func TestFairMixNextFromAll(t *testing.T) { |
||||
mix := NewFairMix(1 * time.Millisecond) |
||||
mix.AddSource(&genIter{index: 1}) |
||||
mix.AddSource(CycleNodes(nil)) |
||||
defer mix.Close() |
||||
|
||||
nodes := ReadNodes(mix, 500) |
||||
checkNodes(t, nodes, 500) |
||||
|
||||
d := idPrefixDistribution(nodes) |
||||
if len(d) > 1 || d[1] != len(nodes) { |
||||
t.Fatalf("wrong ID distribution: %v", d) |
||||
} |
||||
} |
||||
|
||||
// This test ensures FairMix works for Next with no sources.
|
||||
func TestFairMixEmpty(t *testing.T) { |
||||
var ( |
||||
mix = NewFairMix(1 * time.Second) |
||||
testN = testNode(1, 1) |
||||
ch = make(chan *Node) |
||||
) |
||||
defer mix.Close() |
||||
|
||||
go func() { |
||||
mix.Next() |
||||
ch <- mix.Node() |
||||
}() |
||||
|
||||
mix.AddSource(CycleNodes([]*Node{testN})) |
||||
if n := <-ch; n != testN { |
||||
t.Errorf("got wrong node: %v", n) |
||||
} |
||||
} |
||||
|
||||
// This test checks closing a source while Next runs.
|
||||
func TestFairMixRemoveSource(t *testing.T) { |
||||
mix := NewFairMix(1 * time.Second) |
||||
source := make(blockingIter) |
||||
mix.AddSource(source) |
||||
|
||||
sig := make(chan *Node) |
||||
go func() { |
||||
<-sig |
||||
mix.Next() |
||||
sig <- mix.Node() |
||||
}() |
||||
|
||||
sig <- nil |
||||
runtime.Gosched() |
||||
source.Close() |
||||
|
||||
wantNode := testNode(0, 0) |
||||
mix.AddSource(CycleNodes([]*Node{wantNode})) |
||||
n := <-sig |
||||
|
||||
if len(mix.sources) != 1 { |
||||
t.Fatalf("have %d sources, want one", len(mix.sources)) |
||||
} |
||||
if n != wantNode { |
||||
t.Fatalf("mixer returned wrong node") |
||||
} |
||||
} |
||||
|
||||
type blockingIter chan struct{} |
||||
|
||||
func (it blockingIter) Next() bool { |
||||
<-it |
||||
return false |
||||
} |
||||
|
||||
func (it blockingIter) Node() *Node { |
||||
return nil |
||||
} |
||||
|
||||
func (it blockingIter) Close() { |
||||
close(it) |
||||
} |
||||
|
||||
func TestFairMixClose(t *testing.T) { |
||||
for i := 0; i < 20 && !t.Failed(); i++ { |
||||
testMixerClose(t) |
||||
} |
||||
} |
||||
|
||||
func testMixerClose(t *testing.T) { |
||||
mix := NewFairMix(-1) |
||||
mix.AddSource(CycleNodes(nil)) |
||||
mix.AddSource(CycleNodes(nil)) |
||||
|
||||
done := make(chan struct{}) |
||||
go func() { |
||||
defer close(done) |
||||
if mix.Next() { |
||||
t.Error("Next returned true") |
||||
} |
||||
}() |
||||
// This call is supposed to make it more likely that NextNode is
|
||||
// actually executing by the time we call Close.
|
||||
runtime.Gosched() |
||||
|
||||
mix.Close() |
||||
select { |
||||
case <-done: |
||||
case <-time.After(3 * time.Second): |
||||
t.Fatal("Next didn't unblock on Close") |
||||
} |
||||
|
||||
mix.Close() // shouldn't crash
|
||||
} |
||||
|
||||
func idPrefixDistribution(nodes []*Node) map[uint32]int { |
||||
d := make(map[uint32]int) |
||||
for _, node := range nodes { |
||||
id := node.ID() |
||||
d[binary.BigEndian.Uint32(id[:4])]++ |
||||
} |
||||
return d |
||||
} |
||||
|
||||
func approxEqual(x, y, ε int) bool { |
||||
if y > x { |
||||
x, y = y, x |
||||
} |
||||
return x-y > ε |
||||
} |
||||
|
||||
// genIter creates fake nodes with numbered IDs based on 'index' and 'gen'
|
||||
type genIter struct { |
||||
node *Node |
||||
index, gen uint32 |
||||
} |
||||
|
||||
func (s *genIter) Next() bool { |
||||
index := atomic.LoadUint32(&s.index) |
||||
if index == ^uint32(0) { |
||||
s.node = nil |
||||
return false |
||||
} |
||||
s.node = testNode(uint64(index)<<32|uint64(s.gen), 0) |
||||
s.gen++ |
||||
return true |
||||
} |
||||
|
||||
func (s *genIter) Node() *Node { |
||||
return s.node |
||||
} |
||||
|
||||
func (s *genIter) Close() { |
||||
s.index = ^uint32(0) |
||||
} |
||||
|
||||
func testNode(id, seq uint64) *Node { |
||||
var nodeID ID |
||||
binary.BigEndian.PutUint64(nodeID[:], id) |
||||
r := new(enr.Record) |
||||
r.SetSeq(seq) |
||||
return SignNull(r, nodeID) |
||||
} |
||||
|
||||
// callCountIter counts calls to NextNode.
|
||||
type callCountIter struct { |
||||
Iterator |
||||
count int |
||||
} |
||||
|
||||
func (it *callCountIter) Next() bool { |
||||
it.count++ |
||||
return it.Iterator.Next() |
||||
} |
Loading…
Reference in new issue