mirror of https://github.com/ethereum/go-ethereum
The most visible change is event-based dialing, which should be an improvement over the timer-based system that we have at the moment. The dialer gets a chance to compute new tasks whenever peers change or dials complete. This is better than checking peers on a timer because dials happen faster. The dialer can now make more precise decisions about whom to dial based on the peer set and we can test those decisions without actually opening any sockets. Peer management is easier to test because the tests can inject connections at checkpoints (after enc handshake, after protocol handshake). Most of the handshake stuff is now part of the RLPx code. It could be exported or move to its own package because it is no longer entangled with Server logic.pull/1014/head
parent
9f38ef5d97
commit
1440f9a37a
@ -0,0 +1,276 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"container/heap" |
||||
"crypto/rand" |
||||
"fmt" |
||||
"net" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/logger" |
||||
"github.com/ethereum/go-ethereum/logger/glog" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
) |
||||
|
||||
const ( |
||||
// This is the amount of time spent waiting in between
|
||||
// redialing a certain node.
|
||||
dialHistoryExpiration = 30 * time.Second |
||||
|
||||
// Discovery lookup tasks will wait for this long when
|
||||
// no results are returned. This can happen if the table
|
||||
// becomes empty (i.e. not often).
|
||||
emptyLookupDelay = 10 * time.Second |
||||
) |
||||
|
||||
// dialstate schedules dials and discovery lookups.
|
||||
// it get's a chance to compute new tasks on every iteration
|
||||
// of the main loop in Server.run.
|
||||
type dialstate struct { |
||||
maxDynDials int |
||||
ntab discoverTable |
||||
|
||||
lookupRunning bool |
||||
bootstrapped bool |
||||
|
||||
dialing map[discover.NodeID]connFlag |
||||
lookupBuf []*discover.Node // current discovery lookup results
|
||||
randomNodes []*discover.Node // filled from Table
|
||||
static map[discover.NodeID]*discover.Node |
||||
hist *dialHistory |
||||
} |
||||
|
||||
type discoverTable interface { |
||||
Self() *discover.Node |
||||
Close() |
||||
Bootstrap([]*discover.Node) |
||||
Lookup(target discover.NodeID) []*discover.Node |
||||
ReadRandomNodes([]*discover.Node) int |
||||
} |
||||
|
||||
// the dial history remembers recent dials.
|
||||
type dialHistory []pastDial |
||||
|
||||
// pastDial is an entry in the dial history.
|
||||
type pastDial struct { |
||||
id discover.NodeID |
||||
exp time.Time |
||||
} |
||||
|
||||
type task interface { |
||||
Do(*Server) |
||||
} |
||||
|
||||
// A dialTask is generated for each node that is dialed.
|
||||
type dialTask struct { |
||||
flags connFlag |
||||
dest *discover.Node |
||||
} |
||||
|
||||
// discoverTask runs discovery table operations.
|
||||
// Only one discoverTask is active at any time.
|
||||
//
|
||||
// If bootstrap is true, the task runs Table.Bootstrap,
|
||||
// otherwise it performs a random lookup and leaves the
|
||||
// results in the task.
|
||||
type discoverTask struct { |
||||
bootstrap bool |
||||
results []*discover.Node |
||||
} |
||||
|
||||
// A waitExpireTask is generated if there are no other tasks
|
||||
// to keep the loop in Server.run ticking.
|
||||
type waitExpireTask struct { |
||||
time.Duration |
||||
} |
||||
|
||||
func newDialState(static []*discover.Node, ntab discoverTable, maxdyn int) *dialstate { |
||||
s := &dialstate{ |
||||
maxDynDials: maxdyn, |
||||
ntab: ntab, |
||||
static: make(map[discover.NodeID]*discover.Node), |
||||
dialing: make(map[discover.NodeID]connFlag), |
||||
randomNodes: make([]*discover.Node, maxdyn/2), |
||||
hist: new(dialHistory), |
||||
} |
||||
for _, n := range static { |
||||
s.static[n.ID] = n |
||||
} |
||||
return s |
||||
} |
||||
|
||||
func (s *dialstate) addStatic(n *discover.Node) { |
||||
s.static[n.ID] = n |
||||
} |
||||
|
||||
func (s *dialstate) newTasks(nRunning int, peers map[discover.NodeID]*Peer, now time.Time) []task { |
||||
var newtasks []task |
||||
addDial := func(flag connFlag, n *discover.Node) bool { |
||||
_, dialing := s.dialing[n.ID] |
||||
if dialing || peers[n.ID] != nil || s.hist.contains(n.ID) { |
||||
return false |
||||
} |
||||
s.dialing[n.ID] = flag |
||||
newtasks = append(newtasks, &dialTask{flags: flag, dest: n}) |
||||
return true |
||||
} |
||||
|
||||
// Compute number of dynamic dials necessary at this point.
|
||||
needDynDials := s.maxDynDials |
||||
for _, p := range peers { |
||||
if p.rw.is(dynDialedConn) { |
||||
needDynDials-- |
||||
} |
||||
} |
||||
for _, flag := range s.dialing { |
||||
if flag&dynDialedConn != 0 { |
||||
needDynDials-- |
||||
} |
||||
} |
||||
|
||||
// Expire the dial history on every invocation.
|
||||
s.hist.expire(now) |
||||
|
||||
// Create dials for static nodes if they are not connected.
|
||||
for _, n := range s.static { |
||||
addDial(staticDialedConn, n) |
||||
} |
||||
|
||||
// Use random nodes from the table for half of the necessary
|
||||
// dynamic dials.
|
||||
randomCandidates := needDynDials / 2 |
||||
if randomCandidates > 0 && s.bootstrapped { |
||||
n := s.ntab.ReadRandomNodes(s.randomNodes) |
||||
for i := 0; i < randomCandidates && i < n; i++ { |
||||
if addDial(dynDialedConn, s.randomNodes[i]) { |
||||
needDynDials-- |
||||
} |
||||
} |
||||
} |
||||
// Create dynamic dials from random lookup results, removing tried
|
||||
// items from the result buffer.
|
||||
i := 0 |
||||
for ; i < len(s.lookupBuf) && needDynDials > 0; i++ { |
||||
if addDial(dynDialedConn, s.lookupBuf[i]) { |
||||
needDynDials-- |
||||
} |
||||
} |
||||
s.lookupBuf = s.lookupBuf[:copy(s.lookupBuf, s.lookupBuf[i:])] |
||||
// Launch a discovery lookup if more candidates are needed. The
|
||||
// first discoverTask bootstraps the table and won't return any
|
||||
// results.
|
||||
if len(s.lookupBuf) < needDynDials && !s.lookupRunning { |
||||
s.lookupRunning = true |
||||
newtasks = append(newtasks, &discoverTask{bootstrap: !s.bootstrapped}) |
||||
} |
||||
|
||||
// Launch a timer to wait for the next node to expire if all
|
||||
// candidates have been tried and no task is currently active.
|
||||
// This should prevent cases where the dialer logic is not ticked
|
||||
// because there are no pending events.
|
||||
if nRunning == 0 && len(newtasks) == 0 && s.hist.Len() > 0 { |
||||
t := &waitExpireTask{s.hist.min().exp.Sub(now)} |
||||
newtasks = append(newtasks, t) |
||||
} |
||||
return newtasks |
||||
} |
||||
|
||||
func (s *dialstate) taskDone(t task, now time.Time) { |
||||
switch t := t.(type) { |
||||
case *dialTask: |
||||
s.hist.add(t.dest.ID, now.Add(dialHistoryExpiration)) |
||||
delete(s.dialing, t.dest.ID) |
||||
case *discoverTask: |
||||
if t.bootstrap { |
||||
s.bootstrapped = true |
||||
} |
||||
s.lookupRunning = false |
||||
s.lookupBuf = append(s.lookupBuf, t.results...) |
||||
} |
||||
} |
||||
|
||||
func (t *dialTask) Do(srv *Server) { |
||||
addr := &net.TCPAddr{IP: t.dest.IP, Port: int(t.dest.TCP)} |
||||
glog.V(logger.Debug).Infof("dialing %v\n", t.dest) |
||||
fd, err := srv.Dialer.Dial("tcp", addr.String()) |
||||
if err != nil { |
||||
glog.V(logger.Detail).Infof("dial error: %v", err) |
||||
return |
||||
} |
||||
srv.setupConn(fd, t.flags, t.dest) |
||||
} |
||||
func (t *dialTask) String() string { |
||||
return fmt.Sprintf("%v %x %v:%d", t.flags, t.dest.ID[:8], t.dest.IP, t.dest.TCP) |
||||
} |
||||
|
||||
func (t *discoverTask) Do(srv *Server) { |
||||
if t.bootstrap { |
||||
srv.ntab.Bootstrap(srv.BootstrapNodes) |
||||
} else { |
||||
var target discover.NodeID |
||||
rand.Read(target[:]) |
||||
t.results = srv.ntab.Lookup(target) |
||||
// newTasks generates a lookup task whenever dynamic dials are
|
||||
// necessary. Lookups need to take some time, otherwise the
|
||||
// event loop spins too fast. An empty result can only be
|
||||
// returned if the table is empty.
|
||||
if len(t.results) == 0 { |
||||
time.Sleep(emptyLookupDelay) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (t *discoverTask) String() (s string) { |
||||
if t.bootstrap { |
||||
s = "discovery bootstrap" |
||||
} else { |
||||
s = "discovery lookup" |
||||
} |
||||
if len(t.results) > 0 { |
||||
s += fmt.Sprintf(" (%d results)", len(t.results)) |
||||
} |
||||
return s |
||||
} |
||||
|
||||
func (t waitExpireTask) Do(*Server) { |
||||
time.Sleep(t.Duration) |
||||
} |
||||
func (t waitExpireTask) String() string { |
||||
return fmt.Sprintf("wait for dial hist expire (%v)", t.Duration) |
||||
} |
||||
|
||||
// Use only these methods to access or modify dialHistory.
|
||||
func (h dialHistory) min() pastDial { |
||||
return h[0] |
||||
} |
||||
func (h *dialHistory) add(id discover.NodeID, exp time.Time) { |
||||
heap.Push(h, pastDial{id, exp}) |
||||
} |
||||
func (h dialHistory) contains(id discover.NodeID) bool { |
||||
for _, v := range h { |
||||
if v.id == id { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
func (h *dialHistory) expire(now time.Time) { |
||||
for h.Len() > 0 && h.min().exp.Before(now) { |
||||
heap.Pop(h) |
||||
} |
||||
} |
||||
|
||||
// heap.Interface boilerplate
|
||||
func (h dialHistory) Len() int { return len(h) } |
||||
func (h dialHistory) Less(i, j int) bool { return h[i].exp.Before(h[j].exp) } |
||||
func (h dialHistory) Swap(i, j int) { h[i], h[j] = h[j], h[i] } |
||||
func (h *dialHistory) Push(x interface{}) { |
||||
*h = append(*h, x.(pastDial)) |
||||
} |
||||
func (h *dialHistory) Pop() interface{} { |
||||
old := *h |
||||
n := len(old) |
||||
x := old[n-1] |
||||
*h = old[0 : n-1] |
||||
return x |
||||
} |
@ -0,0 +1,482 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"reflect" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/davecgh/go-spew/spew" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
) |
||||
|
||||
func init() { |
||||
spew.Config.Indent = "\t" |
||||
} |
||||
|
||||
type dialtest struct { |
||||
init *dialstate // state before and after the test.
|
||||
rounds []round |
||||
} |
||||
|
||||
type round struct { |
||||
peers []*Peer // current peer set
|
||||
done []task // tasks that got done this round
|
||||
new []task // the result must match this one
|
||||
} |
||||
|
||||
func runDialTest(t *testing.T, test dialtest) { |
||||
var ( |
||||
vtime time.Time |
||||
running int |
||||
) |
||||
pm := func(ps []*Peer) map[discover.NodeID]*Peer { |
||||
m := make(map[discover.NodeID]*Peer) |
||||
for _, p := range ps { |
||||
m[p.rw.id] = p |
||||
} |
||||
return m |
||||
} |
||||
for i, round := range test.rounds { |
||||
for _, task := range round.done { |
||||
running-- |
||||
if running < 0 { |
||||
panic("running task counter underflow") |
||||
} |
||||
test.init.taskDone(task, vtime) |
||||
} |
||||
|
||||
new := test.init.newTasks(running, pm(round.peers), vtime) |
||||
if !sametasks(new, round.new) { |
||||
t.Errorf("round %d: new tasks mismatch:\ngot %v\nwant %v\nstate: %v\nrunning: %v\n", |
||||
i, spew.Sdump(new), spew.Sdump(round.new), spew.Sdump(test.init), spew.Sdump(running)) |
||||
} |
||||
|
||||
// Time advances by 16 seconds on every round.
|
||||
vtime = vtime.Add(16 * time.Second) |
||||
running += len(new) |
||||
} |
||||
} |
||||
|
||||
type fakeTable []*discover.Node |
||||
|
||||
func (t fakeTable) Self() *discover.Node { return new(discover.Node) } |
||||
func (t fakeTable) Close() {} |
||||
func (t fakeTable) Bootstrap([]*discover.Node) {} |
||||
func (t fakeTable) Lookup(target discover.NodeID) []*discover.Node { |
||||
return nil |
||||
} |
||||
func (t fakeTable) ReadRandomNodes(buf []*discover.Node) int { |
||||
return copy(buf, t) |
||||
} |
||||
|
||||
// This test checks that dynamic dials are launched from discovery results.
|
||||
func TestDialStateDynDial(t *testing.T) { |
||||
runDialTest(t, dialtest{ |
||||
init: newDialState(nil, fakeTable{}, 5), |
||||
rounds: []round{ |
||||
// A discovery query is launched.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(0)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
}, |
||||
new: []task{&discoverTask{bootstrap: true}}, |
||||
}, |
||||
// Dynamic dials are launched when it completes.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(0)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
}, |
||||
done: []task{ |
||||
&discoverTask{bootstrap: true, results: []*discover.Node{ |
||||
{ID: uintID(2)}, // this one is already connected and not dialed.
|
||||
{ID: uintID(3)}, |
||||
{ID: uintID(4)}, |
||||
{ID: uintID(5)}, |
||||
{ID: uintID(6)}, // these are not tried because max dyn dials is 5
|
||||
{ID: uintID(7)}, // ...
|
||||
}}, |
||||
}, |
||||
new: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(4)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(5)}}, |
||||
}, |
||||
}, |
||||
// Some of the dials complete but no new ones are launched yet because
|
||||
// the sum of active dial count and dynamic peer count is == maxDynDials.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(0)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(3)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(4)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(4)}}, |
||||
}, |
||||
}, |
||||
// No new dial tasks are launched in the this round because
|
||||
// maxDynDials has been reached.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(0)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(3)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(4)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(5)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(5)}}, |
||||
}, |
||||
new: []task{ |
||||
&waitExpireTask{Duration: 14 * time.Second}, |
||||
}, |
||||
}, |
||||
// In this round, the peer with id 2 drops off. The query
|
||||
// results from last discovery lookup are reused.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(0)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(3)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(4)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(5)}}, |
||||
}, |
||||
new: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(6)}}, |
||||
}, |
||||
}, |
||||
// More peers (3,4) drop off and dial for ID 6 completes.
|
||||
// The last query result from the discovery lookup is reused
|
||||
// and a new one is spawned because more candidates are needed.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(0)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(5)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(6)}}, |
||||
}, |
||||
new: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(7)}}, |
||||
&discoverTask{}, |
||||
}, |
||||
}, |
||||
// Peer 7 is connected, but there still aren't enough dynamic peers
|
||||
// (4 out of 5). However, a discovery is already running, so ensure
|
||||
// no new is started.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(0)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(5)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(7)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(7)}}, |
||||
}, |
||||
}, |
||||
// Finish the running node discovery with an empty set. A new lookup
|
||||
// should be immediately requested.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(0)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(5)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(7)}}, |
||||
}, |
||||
done: []task{ |
||||
&discoverTask{}, |
||||
}, |
||||
new: []task{ |
||||
&discoverTask{}, |
||||
}, |
||||
}, |
||||
}, |
||||
}) |
||||
} |
||||
|
||||
func TestDialStateDynDialFromTable(t *testing.T) { |
||||
// This table always returns the same random nodes
|
||||
// in the order given below.
|
||||
table := fakeTable{ |
||||
{ID: uintID(1)}, |
||||
{ID: uintID(2)}, |
||||
{ID: uintID(3)}, |
||||
{ID: uintID(4)}, |
||||
{ID: uintID(5)}, |
||||
{ID: uintID(6)}, |
||||
{ID: uintID(7)}, |
||||
{ID: uintID(8)}, |
||||
} |
||||
|
||||
runDialTest(t, dialtest{ |
||||
init: newDialState(nil, table, 10), |
||||
rounds: []round{ |
||||
// Discovery bootstrap is launched.
|
||||
{ |
||||
new: []task{&discoverTask{bootstrap: true}}, |
||||
}, |
||||
// 5 out of 8 of the nodes returned by ReadRandomNodes are dialed.
|
||||
{ |
||||
done: []task{ |
||||
&discoverTask{bootstrap: true}, |
||||
}, |
||||
new: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(1)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(2)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(4)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(5)}}, |
||||
&discoverTask{bootstrap: false}, |
||||
}, |
||||
}, |
||||
// Dialing nodes 1,2 succeeds. Dials from the lookup are launched.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(1)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(2)}}, |
||||
&discoverTask{results: []*discover.Node{ |
||||
{ID: uintID(10)}, |
||||
{ID: uintID(11)}, |
||||
{ID: uintID(12)}, |
||||
}}, |
||||
}, |
||||
new: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(10)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(11)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(12)}}, |
||||
&discoverTask{bootstrap: false}, |
||||
}, |
||||
}, |
||||
// Dialing nodes 3,4,5 fails. The dials from the lookup succeed.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(10)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(11)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(12)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(4)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(5)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(10)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(11)}}, |
||||
&dialTask{dynDialedConn, &discover.Node{ID: uintID(12)}}, |
||||
}, |
||||
}, |
||||
// Waiting for expiry. No waitExpireTask is launched because the
|
||||
// discovery query is still running.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(10)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(11)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(12)}}, |
||||
}, |
||||
}, |
||||
// Nodes 3,4 are not tried again because only the first two
|
||||
// returned random nodes (nodes 1,2) are tried and they're
|
||||
// already connected.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(10)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(11)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(12)}}, |
||||
}, |
||||
}, |
||||
}, |
||||
}) |
||||
} |
||||
|
||||
// This test checks that static dials are launched.
|
||||
func TestDialStateStaticDial(t *testing.T) { |
||||
wantStatic := []*discover.Node{ |
||||
{ID: uintID(1)}, |
||||
{ID: uintID(2)}, |
||||
{ID: uintID(3)}, |
||||
{ID: uintID(4)}, |
||||
{ID: uintID(5)}, |
||||
} |
||||
|
||||
runDialTest(t, dialtest{ |
||||
init: newDialState(wantStatic, fakeTable{}, 0), |
||||
rounds: []round{ |
||||
// Static dials are launched for the nodes that
|
||||
// aren't yet connected.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
}, |
||||
new: []task{ |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(4)}}, |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(5)}}, |
||||
}, |
||||
}, |
||||
// No new tasks are launched in this round because all static
|
||||
// nodes are either connected or still being dialed.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(3)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
}, |
||||
}, |
||||
// No new dial tasks are launched because all static
|
||||
// nodes are now connected.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(3)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(4)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(5)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(4)}}, |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(5)}}, |
||||
}, |
||||
new: []task{ |
||||
&waitExpireTask{Duration: 14 * time.Second}, |
||||
}, |
||||
}, |
||||
// Wait a round for dial history to expire, no new tasks should spawn.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(3)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(4)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(5)}}, |
||||
}, |
||||
}, |
||||
// If a static node is dropped, it should be immediately redialed,
|
||||
// irrespective whether it was originally static or dynamic.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(3)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(5)}}, |
||||
}, |
||||
new: []task{ |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(2)}}, |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(4)}}, |
||||
}, |
||||
}, |
||||
}, |
||||
}) |
||||
} |
||||
|
||||
// This test checks that past dials are not retried for some time.
|
||||
func TestDialStateCache(t *testing.T) { |
||||
wantStatic := []*discover.Node{ |
||||
{ID: uintID(1)}, |
||||
{ID: uintID(2)}, |
||||
{ID: uintID(3)}, |
||||
} |
||||
|
||||
runDialTest(t, dialtest{ |
||||
init: newDialState(wantStatic, fakeTable{}, 0), |
||||
rounds: []round{ |
||||
// Static dials are launched for the nodes that
|
||||
// aren't yet connected.
|
||||
{ |
||||
peers: nil, |
||||
new: []task{ |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(1)}}, |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(2)}}, |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
}, |
||||
}, |
||||
// No new tasks are launched in this round because all static
|
||||
// nodes are either connected or still being dialed.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: staticDialedConn, id: uintID(2)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(1)}}, |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(2)}}, |
||||
}, |
||||
}, |
||||
// A salvage task is launched to wait for node 3's history
|
||||
// entry to expire.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
}, |
||||
done: []task{ |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
}, |
||||
new: []task{ |
||||
&waitExpireTask{Duration: 14 * time.Second}, |
||||
}, |
||||
}, |
||||
// Still waiting for node 3's entry to expire in the cache.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
}, |
||||
}, |
||||
// The cache entry for node 3 has expired and is retried.
|
||||
{ |
||||
peers: []*Peer{ |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(1)}}, |
||||
{rw: &conn{flags: dynDialedConn, id: uintID(2)}}, |
||||
}, |
||||
new: []task{ |
||||
&dialTask{staticDialedConn, &discover.Node{ID: uintID(3)}}, |
||||
}, |
||||
}, |
||||
}, |
||||
}) |
||||
} |
||||
|
||||
// compares task lists but doesn't care about the order.
|
||||
func sametasks(a, b []task) bool { |
||||
if len(a) != len(b) { |
||||
return false |
||||
} |
||||
next: |
||||
for _, ta := range a { |
||||
for _, tb := range b { |
||||
if reflect.DeepEqual(ta, tb) { |
||||
continue next |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
func uintID(i uint32) discover.NodeID { |
||||
var id discover.NodeID |
||||
binary.BigEndian.PutUint32(id[:], i) |
||||
return id |
||||
} |
@ -1,448 +0,0 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"crypto/ecdsa" |
||||
"crypto/elliptic" |
||||
"crypto/rand" |
||||
"errors" |
||||
"fmt" |
||||
"hash" |
||||
"io" |
||||
"net" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/crypto/ecies" |
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1" |
||||
"github.com/ethereum/go-ethereum/crypto/sha3" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
const ( |
||||
sskLen = 16 // ecies.MaxSharedKeyLength(pubKey) / 2
|
||||
sigLen = 65 // elliptic S256
|
||||
pubLen = 64 // 512 bit pubkey in uncompressed representation without format byte
|
||||
shaLen = 32 // hash length (for nonce etc)
|
||||
|
||||
authMsgLen = sigLen + shaLen + pubLen + shaLen + 1 |
||||
authRespLen = pubLen + shaLen + 1 |
||||
|
||||
eciesBytes = 65 + 16 + 32 |
||||
encAuthMsgLen = authMsgLen + eciesBytes // size of the final ECIES payload sent as initiator's handshake
|
||||
encAuthRespLen = authRespLen + eciesBytes // size of the final ECIES payload sent as receiver's handshake
|
||||
) |
||||
|
||||
// conn represents a remote connection after encryption handshake
|
||||
// and protocol handshake have completed.
|
||||
//
|
||||
// The MsgReadWriter is usually layered as follows:
|
||||
//
|
||||
// netWrapper (I/O timeouts, thread-safe ReadMsg, WriteMsg)
|
||||
// rlpxFrameRW (message encoding, encryption, authentication)
|
||||
// bufio.ReadWriter (buffering)
|
||||
// net.Conn (network I/O)
|
||||
//
|
||||
type conn struct { |
||||
MsgReadWriter |
||||
*protoHandshake |
||||
} |
||||
|
||||
// secrets represents the connection secrets
|
||||
// which are negotiated during the encryption handshake.
|
||||
type secrets struct { |
||||
RemoteID discover.NodeID |
||||
AES, MAC []byte |
||||
EgressMAC, IngressMAC hash.Hash |
||||
Token []byte |
||||
} |
||||
|
||||
// protoHandshake is the RLP structure of the protocol handshake.
|
||||
type protoHandshake struct { |
||||
Version uint64 |
||||
Name string |
||||
Caps []Cap |
||||
ListenPort uint64 |
||||
ID discover.NodeID |
||||
} |
||||
|
||||
// setupConn starts a protocol session on the given connection. It
|
||||
// runs the encryption handshake and the protocol handshake. If dial
|
||||
// is non-nil, the connection the local node is the initiator. If
|
||||
// keepconn returns false, the connection will be disconnected with
|
||||
// DiscTooManyPeers after the key exchange.
|
||||
func setupConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, dial *discover.Node, keepconn func(discover.NodeID) bool) (*conn, error) { |
||||
if dial == nil { |
||||
return setupInboundConn(fd, prv, our, keepconn) |
||||
} else { |
||||
return setupOutboundConn(fd, prv, our, dial, keepconn) |
||||
} |
||||
} |
||||
|
||||
func setupInboundConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, keepconn func(discover.NodeID) bool) (*conn, error) { |
||||
secrets, err := receiverEncHandshake(fd, prv, nil) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("encryption handshake failed: %v", err) |
||||
} |
||||
rw := newRlpxFrameRW(fd, secrets) |
||||
if !keepconn(secrets.RemoteID) { |
||||
SendItems(rw, discMsg, DiscTooManyPeers) |
||||
return nil, errors.New("we have too many peers") |
||||
} |
||||
// Run the protocol handshake using authenticated messages.
|
||||
rhs, err := readProtocolHandshake(rw, secrets.RemoteID, our) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if err := Send(rw, handshakeMsg, our); err != nil { |
||||
return nil, fmt.Errorf("protocol handshake write error: %v", err) |
||||
} |
||||
return &conn{rw, rhs}, nil |
||||
} |
||||
|
||||
func setupOutboundConn(fd net.Conn, prv *ecdsa.PrivateKey, our *protoHandshake, dial *discover.Node, keepconn func(discover.NodeID) bool) (*conn, error) { |
||||
secrets, err := initiatorEncHandshake(fd, prv, dial.ID, nil) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("encryption handshake failed: %v", err) |
||||
} |
||||
rw := newRlpxFrameRW(fd, secrets) |
||||
if !keepconn(secrets.RemoteID) { |
||||
SendItems(rw, discMsg, DiscTooManyPeers) |
||||
return nil, errors.New("we have too many peers") |
||||
} |
||||
// Run the protocol handshake using authenticated messages.
|
||||
//
|
||||
// Note that even though writing the handshake is first, we prefer
|
||||
// returning the handshake read error. If the remote side
|
||||
// disconnects us early with a valid reason, we should return it
|
||||
// as the error so it can be tracked elsewhere.
|
||||
werr := make(chan error, 1) |
||||
go func() { werr <- Send(rw, handshakeMsg, our) }() |
||||
rhs, err := readProtocolHandshake(rw, secrets.RemoteID, our) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if err := <-werr; err != nil { |
||||
return nil, fmt.Errorf("protocol handshake write error: %v", err) |
||||
} |
||||
if rhs.ID != dial.ID { |
||||
return nil, errors.New("dialed node id mismatch") |
||||
} |
||||
return &conn{rw, rhs}, nil |
||||
} |
||||
|
||||
// encHandshake contains the state of the encryption handshake.
|
||||
type encHandshake struct { |
||||
initiator bool |
||||
remoteID discover.NodeID |
||||
|
||||
remotePub *ecies.PublicKey // remote-pubk
|
||||
initNonce, respNonce []byte // nonce
|
||||
randomPrivKey *ecies.PrivateKey // ecdhe-random
|
||||
remoteRandomPub *ecies.PublicKey // ecdhe-random-pubk
|
||||
} |
||||
|
||||
// secrets is called after the handshake is completed.
|
||||
// It extracts the connection secrets from the handshake values.
|
||||
func (h *encHandshake) secrets(auth, authResp []byte) (secrets, error) { |
||||
ecdheSecret, err := h.randomPrivKey.GenerateShared(h.remoteRandomPub, sskLen, sskLen) |
||||
if err != nil { |
||||
return secrets{}, err |
||||
} |
||||
|
||||
// derive base secrets from ephemeral key agreement
|
||||
sharedSecret := crypto.Sha3(ecdheSecret, crypto.Sha3(h.respNonce, h.initNonce)) |
||||
aesSecret := crypto.Sha3(ecdheSecret, sharedSecret) |
||||
s := secrets{ |
||||
RemoteID: h.remoteID, |
||||
AES: aesSecret, |
||||
MAC: crypto.Sha3(ecdheSecret, aesSecret), |
||||
Token: crypto.Sha3(sharedSecret), |
||||
} |
||||
|
||||
// setup sha3 instances for the MACs
|
||||
mac1 := sha3.NewKeccak256() |
||||
mac1.Write(xor(s.MAC, h.respNonce)) |
||||
mac1.Write(auth) |
||||
mac2 := sha3.NewKeccak256() |
||||
mac2.Write(xor(s.MAC, h.initNonce)) |
||||
mac2.Write(authResp) |
||||
if h.initiator { |
||||
s.EgressMAC, s.IngressMAC = mac1, mac2 |
||||
} else { |
||||
s.EgressMAC, s.IngressMAC = mac2, mac1 |
||||
} |
||||
|
||||
return s, nil |
||||
} |
||||
|
||||
func (h *encHandshake) ecdhShared(prv *ecdsa.PrivateKey) ([]byte, error) { |
||||
return ecies.ImportECDSA(prv).GenerateShared(h.remotePub, sskLen, sskLen) |
||||
} |
||||
|
||||
// initiatorEncHandshake negotiates a session token on conn.
|
||||
// it should be called on the dialing side of the connection.
|
||||
//
|
||||
// prv is the local client's private key.
|
||||
// token is the token from a previous session with this node.
|
||||
func initiatorEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, remoteID discover.NodeID, token []byte) (s secrets, err error) { |
||||
h, err := newInitiatorHandshake(remoteID) |
||||
if err != nil { |
||||
return s, err |
||||
} |
||||
auth, err := h.authMsg(prv, token) |
||||
if err != nil { |
||||
return s, err |
||||
} |
||||
if _, err = conn.Write(auth); err != nil { |
||||
return s, err |
||||
} |
||||
|
||||
response := make([]byte, encAuthRespLen) |
||||
if _, err = io.ReadFull(conn, response); err != nil { |
||||
return s, err |
||||
} |
||||
if err := h.decodeAuthResp(response, prv); err != nil { |
||||
return s, err |
||||
} |
||||
return h.secrets(auth, response) |
||||
} |
||||
|
||||
func newInitiatorHandshake(remoteID discover.NodeID) (*encHandshake, error) { |
||||
// generate random initiator nonce
|
||||
n := make([]byte, shaLen) |
||||
if _, err := rand.Read(n); err != nil { |
||||
return nil, err |
||||
} |
||||
// generate random keypair to use for signing
|
||||
randpriv, err := ecies.GenerateKey(rand.Reader, crypto.S256(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
rpub, err := remoteID.Pubkey() |
||||
if err != nil { |
||||
return nil, fmt.Errorf("bad remoteID: %v", err) |
||||
} |
||||
h := &encHandshake{ |
||||
initiator: true, |
||||
remoteID: remoteID, |
||||
remotePub: ecies.ImportECDSAPublic(rpub), |
||||
initNonce: n, |
||||
randomPrivKey: randpriv, |
||||
} |
||||
return h, nil |
||||
} |
||||
|
||||
// authMsg creates an encrypted initiator handshake message.
|
||||
func (h *encHandshake) authMsg(prv *ecdsa.PrivateKey, token []byte) ([]byte, error) { |
||||
var tokenFlag byte |
||||
if token == nil { |
||||
// no session token found means we need to generate shared secret.
|
||||
// ecies shared secret is used as initial session token for new peers
|
||||
// generate shared key from prv and remote pubkey
|
||||
var err error |
||||
if token, err = h.ecdhShared(prv); err != nil { |
||||
return nil, err |
||||
} |
||||
} else { |
||||
// for known peers, we use stored token from the previous session
|
||||
tokenFlag = 0x01 |
||||
} |
||||
|
||||
// sign known message:
|
||||
// ecdh-shared-secret^nonce for new peers
|
||||
// token^nonce for old peers
|
||||
signed := xor(token, h.initNonce) |
||||
signature, err := crypto.Sign(signed, h.randomPrivKey.ExportECDSA()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// encode auth message
|
||||
// signature || sha3(ecdhe-random-pubk) || pubk || nonce || token-flag
|
||||
msg := make([]byte, authMsgLen) |
||||
n := copy(msg, signature) |
||||
n += copy(msg[n:], crypto.Sha3(exportPubkey(&h.randomPrivKey.PublicKey))) |
||||
n += copy(msg[n:], crypto.FromECDSAPub(&prv.PublicKey)[1:]) |
||||
n += copy(msg[n:], h.initNonce) |
||||
msg[n] = tokenFlag |
||||
|
||||
// encrypt auth message using remote-pubk
|
||||
return ecies.Encrypt(rand.Reader, h.remotePub, msg, nil, nil) |
||||
} |
||||
|
||||
// decodeAuthResp decode an encrypted authentication response message.
|
||||
func (h *encHandshake) decodeAuthResp(auth []byte, prv *ecdsa.PrivateKey) error { |
||||
msg, err := crypto.Decrypt(prv, auth) |
||||
if err != nil { |
||||
return fmt.Errorf("could not decrypt auth response (%v)", err) |
||||
} |
||||
h.respNonce = msg[pubLen : pubLen+shaLen] |
||||
h.remoteRandomPub, err = importPublicKey(msg[:pubLen]) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// ignore token flag for now
|
||||
return nil |
||||
} |
||||
|
||||
// receiverEncHandshake negotiates a session token on conn.
|
||||
// it should be called on the listening side of the connection.
|
||||
//
|
||||
// prv is the local client's private key.
|
||||
// token is the token from a previous session with this node.
|
||||
func receiverEncHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, token []byte) (s secrets, err error) { |
||||
// read remote auth sent by initiator.
|
||||
auth := make([]byte, encAuthMsgLen) |
||||
if _, err := io.ReadFull(conn, auth); err != nil { |
||||
return s, err |
||||
} |
||||
h, err := decodeAuthMsg(prv, token, auth) |
||||
if err != nil { |
||||
return s, err |
||||
} |
||||
|
||||
// send auth response
|
||||
resp, err := h.authResp(prv, token) |
||||
if err != nil { |
||||
return s, err |
||||
} |
||||
if _, err = conn.Write(resp); err != nil { |
||||
return s, err |
||||
} |
||||
|
||||
return h.secrets(auth, resp) |
||||
} |
||||
|
||||
func decodeAuthMsg(prv *ecdsa.PrivateKey, token []byte, auth []byte) (*encHandshake, error) { |
||||
var err error |
||||
h := new(encHandshake) |
||||
// generate random keypair for session
|
||||
h.randomPrivKey, err = ecies.GenerateKey(rand.Reader, crypto.S256(), nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// generate random nonce
|
||||
h.respNonce = make([]byte, shaLen) |
||||
if _, err = rand.Read(h.respNonce); err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
msg, err := crypto.Decrypt(prv, auth) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("could not decrypt auth message (%v)", err) |
||||
} |
||||
|
||||
// decode message parameters
|
||||
// signature || sha3(ecdhe-random-pubk) || pubk || nonce || token-flag
|
||||
h.initNonce = msg[authMsgLen-shaLen-1 : authMsgLen-1] |
||||
copy(h.remoteID[:], msg[sigLen+shaLen:sigLen+shaLen+pubLen]) |
||||
rpub, err := h.remoteID.Pubkey() |
||||
if err != nil { |
||||
return nil, fmt.Errorf("bad remoteID: %#v", err) |
||||
} |
||||
h.remotePub = ecies.ImportECDSAPublic(rpub) |
||||
|
||||
// recover remote random pubkey from signed message.
|
||||
if token == nil { |
||||
// TODO: it is an error if the initiator has a token and we don't. check that.
|
||||
|
||||
// no session token means we need to generate shared secret.
|
||||
// ecies shared secret is used as initial session token for new peers.
|
||||
// generate shared key from prv and remote pubkey.
|
||||
if token, err = h.ecdhShared(prv); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
signedMsg := xor(token, h.initNonce) |
||||
remoteRandomPub, err := secp256k1.RecoverPubkey(signedMsg, msg[:sigLen]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
h.remoteRandomPub, _ = importPublicKey(remoteRandomPub) |
||||
return h, nil |
||||
} |
||||
|
||||
// authResp generates the encrypted authentication response message.
|
||||
func (h *encHandshake) authResp(prv *ecdsa.PrivateKey, token []byte) ([]byte, error) { |
||||
// responder auth message
|
||||
// E(remote-pubk, ecdhe-random-pubk || nonce || 0x0)
|
||||
resp := make([]byte, authRespLen) |
||||
n := copy(resp, exportPubkey(&h.randomPrivKey.PublicKey)) |
||||
n += copy(resp[n:], h.respNonce) |
||||
if token == nil { |
||||
resp[n] = 0 |
||||
} else { |
||||
resp[n] = 1 |
||||
} |
||||
// encrypt using remote-pubk
|
||||
return ecies.Encrypt(rand.Reader, h.remotePub, resp, nil, nil) |
||||
} |
||||
|
||||
// importPublicKey unmarshals 512 bit public keys.
|
||||
func importPublicKey(pubKey []byte) (*ecies.PublicKey, error) { |
||||
var pubKey65 []byte |
||||
switch len(pubKey) { |
||||
case 64: |
||||
// add 'uncompressed key' flag
|
||||
pubKey65 = append([]byte{0x04}, pubKey...) |
||||
case 65: |
||||
pubKey65 = pubKey |
||||
default: |
||||
return nil, fmt.Errorf("invalid public key length %v (expect 64/65)", len(pubKey)) |
||||
} |
||||
// TODO: fewer pointless conversions
|
||||
return ecies.ImportECDSAPublic(crypto.ToECDSAPub(pubKey65)), nil |
||||
} |
||||
|
||||
func exportPubkey(pub *ecies.PublicKey) []byte { |
||||
if pub == nil { |
||||
panic("nil pubkey") |
||||
} |
||||
return elliptic.Marshal(pub.Curve, pub.X, pub.Y)[1:] |
||||
} |
||||
|
||||
func xor(one, other []byte) (xor []byte) { |
||||
xor = make([]byte, len(one)) |
||||
for i := 0; i < len(one); i++ { |
||||
xor[i] = one[i] ^ other[i] |
||||
} |
||||
return xor |
||||
} |
||||
|
||||
func readProtocolHandshake(rw MsgReadWriter, wantID discover.NodeID, our *protoHandshake) (*protoHandshake, error) { |
||||
msg, err := rw.ReadMsg() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if msg.Code == discMsg { |
||||
// disconnect before protocol handshake is valid according to the
|
||||
// spec and we send it ourself if Server.addPeer fails.
|
||||
var reason [1]DiscReason |
||||
rlp.Decode(msg.Payload, &reason) |
||||
return nil, reason[0] |
||||
} |
||||
if msg.Code != handshakeMsg { |
||||
return nil, fmt.Errorf("expected handshake, got %x", msg.Code) |
||||
} |
||||
if msg.Size > baseProtocolMaxMsgSize { |
||||
return nil, fmt.Errorf("message too big (%d > %d)", msg.Size, baseProtocolMaxMsgSize) |
||||
} |
||||
var hs protoHandshake |
||||
if err := msg.Decode(&hs); err != nil { |
||||
return nil, err |
||||
} |
||||
// validate handshake info
|
||||
if hs.Version != our.Version { |
||||
SendItems(rw, discMsg, DiscIncompatibleVersion) |
||||
return nil, fmt.Errorf("required version %d, received %d\n", baseProtocolVersion, hs.Version) |
||||
} |
||||
if (hs.ID == discover.NodeID{}) { |
||||
SendItems(rw, discMsg, DiscInvalidIdentity) |
||||
return nil, errors.New("invalid public key in handshake") |
||||
} |
||||
if hs.ID != wantID { |
||||
SendItems(rw, discMsg, DiscUnexpectedIdentity) |
||||
return nil, errors.New("handshake node ID does not match encryption handshake") |
||||
} |
||||
return &hs, nil |
||||
} |
@ -1,172 +0,0 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/rand" |
||||
"fmt" |
||||
"net" |
||||
"reflect" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/crypto/ecies" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
) |
||||
|
||||
func TestSharedSecret(t *testing.T) { |
||||
prv0, _ := crypto.GenerateKey() // = ecdsa.GenerateKey(crypto.S256(), rand.Reader)
|
||||
pub0 := &prv0.PublicKey |
||||
prv1, _ := crypto.GenerateKey() |
||||
pub1 := &prv1.PublicKey |
||||
|
||||
ss0, err := ecies.ImportECDSA(prv0).GenerateShared(ecies.ImportECDSAPublic(pub1), sskLen, sskLen) |
||||
if err != nil { |
||||
return |
||||
} |
||||
ss1, err := ecies.ImportECDSA(prv1).GenerateShared(ecies.ImportECDSAPublic(pub0), sskLen, sskLen) |
||||
if err != nil { |
||||
return |
||||
} |
||||
t.Logf("Secret:\n%v %x\n%v %x", len(ss0), ss0, len(ss0), ss1) |
||||
if !bytes.Equal(ss0, ss1) { |
||||
t.Errorf("dont match :(") |
||||
} |
||||
} |
||||
|
||||
func TestEncHandshake(t *testing.T) { |
||||
for i := 0; i < 20; i++ { |
||||
start := time.Now() |
||||
if err := testEncHandshake(nil); err != nil { |
||||
t.Fatalf("i=%d %v", i, err) |
||||
} |
||||
t.Logf("(without token) %d %v\n", i+1, time.Since(start)) |
||||
} |
||||
|
||||
for i := 0; i < 20; i++ { |
||||
tok := make([]byte, shaLen) |
||||
rand.Reader.Read(tok) |
||||
start := time.Now() |
||||
if err := testEncHandshake(tok); err != nil { |
||||
t.Fatalf("i=%d %v", i, err) |
||||
} |
||||
t.Logf("(with token) %d %v\n", i+1, time.Since(start)) |
||||
} |
||||
} |
||||
|
||||
func testEncHandshake(token []byte) error { |
||||
type result struct { |
||||
side string |
||||
s secrets |
||||
err error |
||||
} |
||||
var ( |
||||
prv0, _ = crypto.GenerateKey() |
||||
prv1, _ = crypto.GenerateKey() |
||||
rw0, rw1 = net.Pipe() |
||||
output = make(chan result) |
||||
) |
||||
|
||||
go func() { |
||||
r := result{side: "initiator"} |
||||
defer func() { output <- r }() |
||||
|
||||
pub1s := discover.PubkeyID(&prv1.PublicKey) |
||||
r.s, r.err = initiatorEncHandshake(rw0, prv0, pub1s, token) |
||||
if r.err != nil { |
||||
return |
||||
} |
||||
id1 := discover.PubkeyID(&prv1.PublicKey) |
||||
if r.s.RemoteID != id1 { |
||||
r.err = fmt.Errorf("remote ID mismatch: got %v, want: %v", r.s.RemoteID, id1) |
||||
} |
||||
}() |
||||
go func() { |
||||
r := result{side: "receiver"} |
||||
defer func() { output <- r }() |
||||
|
||||
r.s, r.err = receiverEncHandshake(rw1, prv1, token) |
||||
if r.err != nil { |
||||
return |
||||
} |
||||
id0 := discover.PubkeyID(&prv0.PublicKey) |
||||
if r.s.RemoteID != id0 { |
||||
r.err = fmt.Errorf("remote ID mismatch: got %v, want: %v", r.s.RemoteID, id0) |
||||
} |
||||
}() |
||||
|
||||
// wait for results from both sides
|
||||
r1, r2 := <-output, <-output |
||||
|
||||
if r1.err != nil { |
||||
return fmt.Errorf("%s side error: %v", r1.side, r1.err) |
||||
} |
||||
if r2.err != nil { |
||||
return fmt.Errorf("%s side error: %v", r2.side, r2.err) |
||||
} |
||||
|
||||
// don't compare remote node IDs
|
||||
r1.s.RemoteID, r2.s.RemoteID = discover.NodeID{}, discover.NodeID{} |
||||
// flip MACs on one of them so they compare equal
|
||||
r1.s.EgressMAC, r1.s.IngressMAC = r1.s.IngressMAC, r1.s.EgressMAC |
||||
if !reflect.DeepEqual(r1.s, r2.s) { |
||||
return fmt.Errorf("secrets mismatch:\n t1: %#v\n t2: %#v", r1.s, r2.s) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func TestSetupConn(t *testing.T) { |
||||
prv0, _ := crypto.GenerateKey() |
||||
prv1, _ := crypto.GenerateKey() |
||||
node0 := &discover.Node{ |
||||
ID: discover.PubkeyID(&prv0.PublicKey), |
||||
IP: net.IP{1, 2, 3, 4}, |
||||
TCP: 33, |
||||
} |
||||
node1 := &discover.Node{ |
||||
ID: discover.PubkeyID(&prv1.PublicKey), |
||||
IP: net.IP{5, 6, 7, 8}, |
||||
TCP: 44, |
||||
} |
||||
hs0 := &protoHandshake{ |
||||
Version: baseProtocolVersion, |
||||
ID: node0.ID, |
||||
Caps: []Cap{{"a", 0}, {"b", 2}}, |
||||
} |
||||
hs1 := &protoHandshake{ |
||||
Version: baseProtocolVersion, |
||||
ID: node1.ID, |
||||
Caps: []Cap{{"c", 1}, {"d", 3}}, |
||||
} |
||||
fd0, fd1 := net.Pipe() |
||||
|
||||
done := make(chan struct{}) |
||||
keepalways := func(discover.NodeID) bool { return true } |
||||
go func() { |
||||
defer close(done) |
||||
conn0, err := setupConn(fd0, prv0, hs0, node1, keepalways) |
||||
if err != nil { |
||||
t.Errorf("outbound side error: %v", err) |
||||
return |
||||
} |
||||
if conn0.ID != node1.ID { |
||||
t.Errorf("outbound conn id mismatch: got %v, want %v", conn0.ID, node1.ID) |
||||
} |
||||
if !reflect.DeepEqual(conn0.Caps, hs1.Caps) { |
||||
t.Errorf("outbound caps mismatch: got %v, want %v", conn0.Caps, hs1.Caps) |
||||
} |
||||
}() |
||||
|
||||
conn1, err := setupConn(fd1, prv1, hs1, nil, keepalways) |
||||
if err != nil { |
||||
t.Fatalf("inbound side error: %v", err) |
||||
} |
||||
if conn1.ID != node0.ID { |
||||
t.Errorf("inbound conn id mismatch: got %v, want %v", conn1.ID, node0.ID) |
||||
} |
||||
if !reflect.DeepEqual(conn1.Caps, hs0.Caps) { |
||||
t.Errorf("inbound caps mismatch: got %v, want %v", conn1.Caps, hs0.Caps) |
||||
} |
||||
|
||||
<-done |
||||
} |
Loading…
Reference in new issue