mirror of https://github.com/ethereum/go-ethereum
commit
8305d409d2
@ -0,0 +1,93 @@ |
||||
/* |
||||
This file is part of go-ethereum |
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify |
||||
it under the terms of the GNU General Public License as published by |
||||
the Free Software Foundation, either version 3 of the License, or |
||||
(at your option) any later version. |
||||
|
||||
go-ethereum is distributed in the hope that it will be useful, |
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
GNU General Public License for more details. |
||||
|
||||
You should have received a copy of the GNU General Public License |
||||
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/ |
||||
|
||||
// Command bootnode runs a bootstrap node for the Discovery Protocol.
|
||||
package main |
||||
|
||||
import ( |
||||
"crypto/ecdsa" |
||||
"encoding/hex" |
||||
"flag" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"log" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/logger" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/p2p/nat" |
||||
) |
||||
|
||||
func main() { |
||||
var ( |
||||
listenAddr = flag.String("addr", ":30301", "listen address") |
||||
genKey = flag.String("genkey", "", "generate a node key and quit") |
||||
nodeKeyFile = flag.String("nodekey", "", "private key filename") |
||||
nodeKeyHex = flag.String("nodekeyhex", "", "private key as hex (for testing)") |
||||
natdesc = flag.String("nat", "none", "port mapping mechanism (any|none|upnp|pmp|extip:<IP>)") |
||||
|
||||
nodeKey *ecdsa.PrivateKey |
||||
err error |
||||
) |
||||
flag.Parse() |
||||
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.DebugLevel)) |
||||
|
||||
if *genKey != "" { |
||||
writeKey(*genKey) |
||||
os.Exit(0) |
||||
} |
||||
|
||||
natm, err := nat.Parse(*natdesc) |
||||
if err != nil { |
||||
log.Fatalf("-nat: %v", err) |
||||
} |
||||
switch { |
||||
case *nodeKeyFile == "" && *nodeKeyHex == "": |
||||
log.Fatal("Use -nodekey or -nodekeyhex to specify a private key") |
||||
case *nodeKeyFile != "" && *nodeKeyHex != "": |
||||
log.Fatal("Options -nodekey and -nodekeyhex are mutually exclusive") |
||||
case *nodeKeyFile != "": |
||||
if nodeKey, err = crypto.LoadECDSA(*nodeKeyFile); err != nil { |
||||
log.Fatalf("-nodekey: %v", err) |
||||
} |
||||
case *nodeKeyHex != "": |
||||
if nodeKey, err = crypto.HexToECDSA(*nodeKeyHex); err != nil { |
||||
log.Fatalf("-nodekeyhex: %v", err) |
||||
} |
||||
} |
||||
|
||||
if _, err := discover.ListenUDP(nodeKey, *listenAddr, natm); err != nil { |
||||
log.Fatal(err) |
||||
} |
||||
select {} |
||||
} |
||||
|
||||
func writeKey(target string) { |
||||
key, err := crypto.GenerateKey() |
||||
if err != nil { |
||||
log.Fatal("could not generate key: %v", err) |
||||
} |
||||
b := crypto.FromECDSA(key) |
||||
if target == "-" { |
||||
fmt.Println(hex.EncodeToString(b)) |
||||
} else { |
||||
if err := ioutil.WriteFile(target, b, 0600); err != nil { |
||||
log.Fatal("write error: ", err) |
||||
} |
||||
} |
||||
} |
@ -1 +1,64 @@ |
||||
var contract = web3.eth.contractFromAbi([{"constant":false,"inputs":[{"name":"_h","type":"hash256"}],"name":"confirm","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":_to","type":"address"},{"name":"_value","type":"uint256"},{"name":"_data","type":"bytes"}],"name":"execute","outputs":[{"name":"_r","type":"hash256"}],"type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"}],"name":"kill","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"}],"name":"changeOwner","outputs":[],"type":"function"},{"inputs":[{"indexed":false,"name":"value","type":"uint256"}],"name":"CashIn","type":"event"},{"inputs":[{"indexed":true,"name":"out","type":"string32"},{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"to","type":"address"}],"name":"SingleTransact","type":"event"},{"inputs":[{"indexed":true,"name":"out","type":"string32"},{"indexed":false,"name":"owner","type":"address"},{"indexed":false,"name":"operation","type":"hash256"},{"indexed":false,"name":"value","type":"uint256"},{"indexed":false,"name":"to","type":"address"}],"name":"MultiTransact","type":"event"}]); |
||||
var contract = web3.eth.contractFromAbi([ |
||||
{ |
||||
"constant":false, |
||||
"inputs":[ |
||||
{"name":"_h","type":"hash256"} |
||||
], |
||||
"name":"confirm", |
||||
"outputs":[], |
||||
"type":"function" |
||||
},{ |
||||
"constant":false, |
||||
"inputs":[ |
||||
{"name":_to,"type":"address"}, |
||||
{"name":"_value","type":"uint256"}, |
||||
{"name":"_data","type":"bytes"} |
||||
], |
||||
"name":"execute", |
||||
"outputs":[ |
||||
{"name":"_r","type":"hash256"} |
||||
], |
||||
"type":"function" |
||||
},{ |
||||
"constant":false, |
||||
"inputs":[ |
||||
{"name":"_to","type":"address"} |
||||
],"name":"kill", |
||||
"outputs":[], |
||||
"type":"function" |
||||
},{ |
||||
"constant":false, |
||||
"inputs":[ |
||||
{"name":"_from","type":"address"}, |
||||
{"name":"_to","type":"address"} |
||||
], |
||||
"name":"changeOwner", |
||||
"outputs":[], |
||||
"type":"function" |
||||
},{ |
||||
"inputs":[ |
||||
{"indexed":false,"name":"value","type":"uint256"} |
||||
], |
||||
"name":"CashIn", |
||||
"type":"event" |
||||
},{ |
||||
"inputs":[ |
||||
{"indexed":true,"name":"out","type":"string32"}, |
||||
{"indexed":false,"name":"owner","type":"address"}, |
||||
{"indexed":false,"name":"value","type":"uint256"}, |
||||
{"indexed":false,"name":"to","type":"address"} |
||||
], |
||||
"name":"SingleTransact", |
||||
"type":"event" |
||||
},{ |
||||
"inputs":[ |
||||
{"indexed":true,"name":"out","type":"string32"}, |
||||
{"indexed":false,"name":"owner","type":"address"}, |
||||
{"indexed":false,"name":"operation","type":"hash256"}, |
||||
{"indexed":false,"name":"value","type":"uint256"}, |
||||
{"indexed":false,"name":"to","type":"address"} |
||||
], |
||||
"name":"MultiTransact", |
||||
"type":"event" |
||||
} |
||||
]); |
||||
|
@ -1,58 +0,0 @@ |
||||
/* |
||||
This file is part of go-ethereum |
||||
|
||||
go-ethereum is free software: you can redistribute it and/or modify |
||||
it under the terms of the GNU General Public License as published by |
||||
the Free Software Foundation, either version 3 of the License, or |
||||
(at your option) any later version. |
||||
|
||||
go-ethereum is distributed in the hope that it will be useful, |
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
||||
GNU General Public License for more details. |
||||
|
||||
You should have received a copy of the GNU General Public License |
||||
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/ |
||||
package main |
||||
|
||||
import ( |
||||
"crypto/elliptic" |
||||
"flag" |
||||
"log" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/logger" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
) |
||||
|
||||
var ( |
||||
natType = flag.String("nat", "", "NAT traversal implementation") |
||||
pmpGateway = flag.String("gateway", "", "gateway address for NAT-PMP") |
||||
listenAddr = flag.String("addr", ":30301", "listen address") |
||||
) |
||||
|
||||
func main() { |
||||
flag.Parse() |
||||
nat, err := p2p.ParseNAT(*natType, *pmpGateway) |
||||
if err != nil { |
||||
log.Fatal("invalid nat:", err) |
||||
} |
||||
|
||||
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.InfoLevel)) |
||||
key, _ := crypto.GenerateKey() |
||||
marshaled := elliptic.Marshal(crypto.S256(), key.PublicKey.X, key.PublicKey.Y) |
||||
|
||||
srv := p2p.Server{ |
||||
MaxPeers: 100, |
||||
Identity: p2p.NewSimpleClientIdentity("Ethereum(G)", "0.1", "Peer Server Two", marshaled), |
||||
ListenAddr: *listenAddr, |
||||
NAT: nat, |
||||
NoDial: true, |
||||
} |
||||
if err := srv.Start(); err != nil { |
||||
log.Fatal("could not start server:", err) |
||||
} |
||||
select {} |
||||
} |
@ -0,0 +1,63 @@ |
||||
package logger |
||||
|
||||
import ( |
||||
"io" |
||||
"log" |
||||
"sync/atomic" |
||||
) |
||||
|
||||
// LogSystem is implemented by log output devices.
|
||||
// All methods can be called concurrently from multiple goroutines.
|
||||
type LogSystem interface { |
||||
GetLogLevel() LogLevel |
||||
SetLogLevel(i LogLevel) |
||||
LogPrint(LogLevel, string) |
||||
} |
||||
|
||||
// NewStdLogSystem creates a LogSystem that prints to the given writer.
|
||||
// The flag values are defined package log.
|
||||
func NewStdLogSystem(writer io.Writer, flags int, level LogLevel) LogSystem { |
||||
logger := log.New(writer, "", flags) |
||||
return &stdLogSystem{logger, uint32(level)} |
||||
} |
||||
|
||||
type stdLogSystem struct { |
||||
logger *log.Logger |
||||
level uint32 |
||||
} |
||||
|
||||
func (t *stdLogSystem) LogPrint(level LogLevel, msg string) { |
||||
t.logger.Print(msg) |
||||
} |
||||
|
||||
func (t *stdLogSystem) SetLogLevel(i LogLevel) { |
||||
atomic.StoreUint32(&t.level, uint32(i)) |
||||
} |
||||
|
||||
func (t *stdLogSystem) GetLogLevel() LogLevel { |
||||
return LogLevel(atomic.LoadUint32(&t.level)) |
||||
} |
||||
|
||||
// NewRawLogSystem creates a LogSystem that prints to the given writer without
|
||||
// adding extra information. Suitable for preformatted output
|
||||
func NewRawLogSystem(writer io.Writer, flags int, level LogLevel) LogSystem { |
||||
logger := log.New(writer, "", 0) |
||||
return &rawLogSystem{logger, uint32(level)} |
||||
} |
||||
|
||||
type rawLogSystem struct { |
||||
logger *log.Logger |
||||
level uint32 |
||||
} |
||||
|
||||
func (t *rawLogSystem) LogPrint(level LogLevel, msg string) { |
||||
t.logger.Print(msg) |
||||
} |
||||
|
||||
func (t *rawLogSystem) SetLogLevel(i LogLevel) { |
||||
atomic.StoreUint32(&t.level, uint32(i)) |
||||
} |
||||
|
||||
func (t *rawLogSystem) GetLogLevel() LogLevel { |
||||
return LogLevel(atomic.LoadUint32(&t.level)) |
||||
} |
@ -0,0 +1,112 @@ |
||||
package logger |
||||
|
||||
import ( |
||||
"sync" |
||||
) |
||||
|
||||
type message struct { |
||||
level LogLevel |
||||
msg string |
||||
} |
||||
|
||||
var ( |
||||
logMessageC = make(chan message) |
||||
addSystemC = make(chan LogSystem) |
||||
flushC = make(chan chan struct{}) |
||||
resetC = make(chan chan struct{}) |
||||
) |
||||
|
||||
func init() { |
||||
go dispatchLoop() |
||||
} |
||||
|
||||
// each system can buffer this many messages before
|
||||
// blocking incoming log messages.
|
||||
const sysBufferSize = 500 |
||||
|
||||
func dispatchLoop() { |
||||
var ( |
||||
systems []LogSystem |
||||
systemIn []chan message |
||||
systemWG sync.WaitGroup |
||||
) |
||||
bootSystem := func(sys LogSystem) { |
||||
in := make(chan message, sysBufferSize) |
||||
systemIn = append(systemIn, in) |
||||
systemWG.Add(1) |
||||
go sysLoop(sys, in, &systemWG) |
||||
} |
||||
|
||||
for { |
||||
select { |
||||
case msg := <-logMessageC: |
||||
for _, c := range systemIn { |
||||
c <- msg |
||||
} |
||||
|
||||
case sys := <-addSystemC: |
||||
systems = append(systems, sys) |
||||
bootSystem(sys) |
||||
|
||||
case waiter := <-resetC: |
||||
// reset means terminate all systems
|
||||
for _, c := range systemIn { |
||||
close(c) |
||||
} |
||||
systems = nil |
||||
systemIn = nil |
||||
systemWG.Wait() |
||||
close(waiter) |
||||
|
||||
case waiter := <-flushC: |
||||
// flush means reboot all systems
|
||||
for _, c := range systemIn { |
||||
close(c) |
||||
} |
||||
systemIn = nil |
||||
systemWG.Wait() |
||||
for _, sys := range systems { |
||||
bootSystem(sys) |
||||
} |
||||
close(waiter) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func sysLoop(sys LogSystem, in <-chan message, wg *sync.WaitGroup) { |
||||
for msg := range in { |
||||
switch sys.(type) { |
||||
case *rawLogSystem: |
||||
// This is a semantic hack since rawLogSystem has little to do with JsonLevel
|
||||
if msg.level == JsonLevel { |
||||
sys.LogPrint(msg.level, msg.msg) |
||||
} |
||||
default: |
||||
if sys.GetLogLevel() >= msg.level { |
||||
sys.LogPrint(msg.level, msg.msg) |
||||
} |
||||
} |
||||
} |
||||
wg.Done() |
||||
} |
||||
|
||||
// Reset removes all active log systems.
|
||||
// It blocks until all current messages have been delivered.
|
||||
func Reset() { |
||||
waiter := make(chan struct{}) |
||||
resetC <- waiter |
||||
<-waiter |
||||
} |
||||
|
||||
// Flush waits until all current log messages have been dispatched to
|
||||
// the active log systems.
|
||||
func Flush() { |
||||
waiter := make(chan struct{}) |
||||
flushC <- waiter |
||||
<-waiter |
||||
} |
||||
|
||||
// AddLogSystem starts printing messages to the given LogSystem.
|
||||
func AddLogSystem(sys LogSystem) { |
||||
addSystemC <- sys |
||||
} |
@ -0,0 +1,360 @@ |
||||
package logger |
||||
|
||||
import ( |
||||
"time" |
||||
) |
||||
|
||||
type utctime8601 struct{} |
||||
|
||||
func (utctime8601) MarshalJSON() ([]byte, error) { |
||||
// FIX This should be re-formated for proper ISO 8601
|
||||
return []byte(`"` + time.Now().UTC().Format(time.RFC3339Nano)[:26] + `Z"`), nil |
||||
} |
||||
|
||||
type JsonLog interface { |
||||
EventName() string |
||||
} |
||||
|
||||
type LogEvent struct { |
||||
Guid string `json:"guid"` |
||||
Ts utctime8601 `json:"ts"` |
||||
// Level string `json:"level"`
|
||||
} |
||||
|
||||
type LogStarting struct { |
||||
ClientString string `json:"version_string"` |
||||
Coinbase string `json:"coinbase"` |
||||
ProtocolVersion int `json:"eth_version"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *LogStarting) EventName() string { |
||||
return "starting" |
||||
} |
||||
|
||||
type P2PConnecting struct { |
||||
RemoteId string `json:"remote_id"` |
||||
RemoteEndpoint string `json:"remote_endpoint"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PConnecting) EventName() string { |
||||
return "p2p.connecting" |
||||
} |
||||
|
||||
type P2PConnected struct { |
||||
NumConnections int `json:"num_connections"` |
||||
RemoteId string `json:"remote_id"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PConnected) EventName() string { |
||||
return "p2p.connected" |
||||
} |
||||
|
||||
type P2PHandshaked struct { |
||||
RemoteCapabilities []string `json:"remote_capabilities"` |
||||
RemoteId string `json:"remote_id"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PHandshaked) EventName() string { |
||||
return "p2p.handshaked" |
||||
} |
||||
|
||||
type P2PDisconnected struct { |
||||
NumConnections int `json:"num_connections"` |
||||
RemoteId string `json:"remote_id"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PDisconnected) EventName() string { |
||||
return "p2p.disconnected" |
||||
} |
||||
|
||||
type P2PDisconnecting struct { |
||||
Reason string `json:"reason"` |
||||
RemoteId string `json:"remote_id"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PDisconnecting) EventName() string { |
||||
return "p2p.disconnecting" |
||||
} |
||||
|
||||
type P2PDisconnectingBadHandshake struct { |
||||
Reason string `json:"reason"` |
||||
RemoteId string `json:"remote_id"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PDisconnectingBadHandshake) EventName() string { |
||||
return "p2p.disconnecting.bad_handshake" |
||||
} |
||||
|
||||
type P2PDisconnectingBadProtocol struct { |
||||
Reason string `json:"reason"` |
||||
RemoteId string `json:"remote_id"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PDisconnectingBadProtocol) EventName() string { |
||||
return "p2p.disconnecting.bad_protocol" |
||||
} |
||||
|
||||
type P2PDisconnectingReputation struct { |
||||
Reason string `json:"reason"` |
||||
RemoteId string `json:"remote_id"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PDisconnectingReputation) EventName() string { |
||||
return "p2p.disconnecting.reputation" |
||||
} |
||||
|
||||
type P2PDisconnectingDHT struct { |
||||
Reason string `json:"reason"` |
||||
RemoteId string `json:"remote_id"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PDisconnectingDHT) EventName() string { |
||||
return "p2p.disconnecting.dht" |
||||
} |
||||
|
||||
type P2PEthDisconnectingBadBlock struct { |
||||
Reason string `json:"reason"` |
||||
RemoteId string `json:"remote_id"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PEthDisconnectingBadBlock) EventName() string { |
||||
return "p2p.eth.disconnecting.bad_block" |
||||
} |
||||
|
||||
type P2PEthDisconnectingBadTx struct { |
||||
Reason string `json:"reason"` |
||||
RemoteId string `json:"remote_id"` |
||||
NumConnections int `json:"num_connections"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *P2PEthDisconnectingBadTx) EventName() string { |
||||
return "p2p.eth.disconnecting.bad_tx" |
||||
} |
||||
|
||||
type EthNewBlockMined struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockHexRlp string `json:"block_hexrlp"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockMined) EventName() string { |
||||
return "eth.newblock.mined" |
||||
} |
||||
|
||||
type EthNewBlockBroadcasted struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockBroadcasted) EventName() string { |
||||
return "eth.newblock.broadcasted" |
||||
} |
||||
|
||||
type EthNewBlockReceived struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockReceived) EventName() string { |
||||
return "eth.newblock.received" |
||||
} |
||||
|
||||
type EthNewBlockIsKnown struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockIsKnown) EventName() string { |
||||
return "eth.newblock.is_known" |
||||
} |
||||
|
||||
type EthNewBlockIsNew struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockIsNew) EventName() string { |
||||
return "eth.newblock.is_new" |
||||
} |
||||
|
||||
type EthNewBlockMissingParent struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockMissingParent) EventName() string { |
||||
return "eth.newblock.missing_parent" |
||||
} |
||||
|
||||
type EthNewBlockIsInvalid struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockIsInvalid) EventName() string { |
||||
return "eth.newblock.is_invalid" |
||||
} |
||||
|
||||
type EthNewBlockChainIsOlder struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockChainIsOlder) EventName() string { |
||||
return "eth.newblock.chain.is_older" |
||||
} |
||||
|
||||
type EthNewBlockChainIsCanonical struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockChainIsCanonical) EventName() string { |
||||
return "eth.newblock.chain.is_cannonical" |
||||
} |
||||
|
||||
type EthNewBlockChainNotCanonical struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockChainNotCanonical) EventName() string { |
||||
return "eth.newblock.chain.not_cannonical" |
||||
} |
||||
|
||||
type EthNewBlockChainSwitched struct { |
||||
BlockNumber int `json:"block_number"` |
||||
HeadHash string `json:"head_hash"` |
||||
OldHeadHash string `json:"old_head_hash"` |
||||
BlockHash string `json:"block_hash"` |
||||
BlockDifficulty int `json:"block_difficulty"` |
||||
BlockPrevHash string `json:"block_prev_hash"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthNewBlockChainSwitched) EventName() string { |
||||
return "eth.newblock.chain.switched" |
||||
} |
||||
|
||||
type EthTxCreated struct { |
||||
TxHash string `json:"tx_hash"` |
||||
TxSender string `json:"tx_sender"` |
||||
TxAddress string `json:"tx_address"` |
||||
TxHexRLP string `json:"tx_hexrlp"` |
||||
TxNonce int `json:"tx_nonce"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthTxCreated) EventName() string { |
||||
return "eth.tx.created" |
||||
} |
||||
|
||||
type EthTxReceived struct { |
||||
TxHash string `json:"tx_hash"` |
||||
TxAddress string `json:"tx_address"` |
||||
TxHexRLP string `json:"tx_hexrlp"` |
||||
RemoteId string `json:"remote_id"` |
||||
TxNonce int `json:"tx_nonce"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthTxReceived) EventName() string { |
||||
return "eth.tx.received" |
||||
} |
||||
|
||||
type EthTxBroadcasted struct { |
||||
TxHash string `json:"tx_hash"` |
||||
TxSender string `json:"tx_sender"` |
||||
TxAddress string `json:"tx_address"` |
||||
TxNonce int `json:"tx_nonce"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthTxBroadcasted) EventName() string { |
||||
return "eth.tx.broadcasted" |
||||
} |
||||
|
||||
type EthTxValidated struct { |
||||
TxHash string `json:"tx_hash"` |
||||
TxSender string `json:"tx_sender"` |
||||
TxAddress string `json:"tx_address"` |
||||
TxNonce int `json:"tx_nonce"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthTxValidated) EventName() string { |
||||
return "eth.tx.validated" |
||||
} |
||||
|
||||
type EthTxIsInvalid struct { |
||||
TxHash string `json:"tx_hash"` |
||||
TxSender string `json:"tx_sender"` |
||||
TxAddress string `json:"tx_address"` |
||||
Reason string `json:"reason"` |
||||
TxNonce int `json:"tx_nonce"` |
||||
LogEvent |
||||
} |
||||
|
||||
func (l *EthTxIsInvalid) EventName() string { |
||||
return "eth.tx.is_invalid" |
||||
} |
@ -1,63 +0,0 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"fmt" |
||||
"runtime" |
||||
) |
||||
|
||||
// ClientIdentity represents the identity of a peer.
|
||||
type ClientIdentity interface { |
||||
String() string // human readable identity
|
||||
Pubkey() []byte // 512-bit public key
|
||||
} |
||||
|
||||
type SimpleClientIdentity struct { |
||||
clientIdentifier string |
||||
version string |
||||
customIdentifier string |
||||
os string |
||||
implementation string |
||||
pubkey []byte |
||||
} |
||||
|
||||
func NewSimpleClientIdentity(clientIdentifier string, version string, customIdentifier string, pubkey []byte) *SimpleClientIdentity { |
||||
clientIdentity := &SimpleClientIdentity{ |
||||
clientIdentifier: clientIdentifier, |
||||
version: version, |
||||
customIdentifier: customIdentifier, |
||||
os: runtime.GOOS, |
||||
implementation: runtime.Version(), |
||||
pubkey: pubkey, |
||||
} |
||||
|
||||
return clientIdentity |
||||
} |
||||
|
||||
func (c *SimpleClientIdentity) init() { |
||||
} |
||||
|
||||
func (c *SimpleClientIdentity) String() string { |
||||
var id string |
||||
if len(c.customIdentifier) > 0 { |
||||
id = "/" + c.customIdentifier |
||||
} |
||||
|
||||
return fmt.Sprintf("%s/v%s%s/%s/%s", |
||||
c.clientIdentifier, |
||||
c.version, |
||||
id, |
||||
c.os, |
||||
c.implementation) |
||||
} |
||||
|
||||
func (c *SimpleClientIdentity) Pubkey() []byte { |
||||
return []byte(c.pubkey) |
||||
} |
||||
|
||||
func (c *SimpleClientIdentity) SetCustomIdentifier(customIdentifier string) { |
||||
c.customIdentifier = customIdentifier |
||||
} |
||||
|
||||
func (c *SimpleClientIdentity) GetCustomIdentifier() string { |
||||
return c.customIdentifier |
||||
} |
@ -1,30 +0,0 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"fmt" |
||||
"runtime" |
||||
"testing" |
||||
) |
||||
|
||||
func TestClientIdentity(t *testing.T) { |
||||
clientIdentity := NewSimpleClientIdentity("Ethereum(G)", "0.5.16", "test", []byte("pubkey")) |
||||
clientString := clientIdentity.String() |
||||
expected := fmt.Sprintf("Ethereum(G)/v0.5.16/test/%s/%s", runtime.GOOS, runtime.Version()) |
||||
if clientString != expected { |
||||
t.Errorf("Expected clientIdentity to be %v, got %v", expected, clientString) |
||||
} |
||||
customIdentifier := clientIdentity.GetCustomIdentifier() |
||||
if customIdentifier != "test" { |
||||
t.Errorf("Expected clientIdentity.GetCustomIdentifier() to be 'test', got %v", customIdentifier) |
||||
} |
||||
clientIdentity.SetCustomIdentifier("test2") |
||||
customIdentifier = clientIdentity.GetCustomIdentifier() |
||||
if customIdentifier != "test2" { |
||||
t.Errorf("Expected clientIdentity.GetCustomIdentifier() to be 'test2', got %v", customIdentifier) |
||||
} |
||||
clientString = clientIdentity.String() |
||||
expected = fmt.Sprintf("Ethereum(G)/v0.5.16/test2/%s/%s", runtime.GOOS, runtime.Version()) |
||||
if clientString != expected { |
||||
t.Errorf("Expected clientIdentity to be %v, got %v", expected, clientString) |
||||
} |
||||
} |
@ -0,0 +1,363 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
// "binary"
|
||||
"crypto/ecdsa" |
||||
"crypto/rand" |
||||
"fmt" |
||||
"io" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1" |
||||
ethlogger "github.com/ethereum/go-ethereum/logger" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/obscuren/ecies" |
||||
) |
||||
|
||||
var clogger = ethlogger.NewLogger("CRYPTOID") |
||||
|
||||
const ( |
||||
sskLen = 16 // ecies.MaxSharedKeyLength(pubKey) / 2
|
||||
sigLen = 65 // elliptic S256
|
||||
pubLen = 64 // 512 bit pubkey in uncompressed representation without format byte
|
||||
shaLen = 32 // hash length (for nonce etc)
|
||||
|
||||
authMsgLen = sigLen + shaLen + pubLen + shaLen + 1 |
||||
authRespLen = pubLen + shaLen + 1 |
||||
|
||||
eciesBytes = 65 + 16 + 32 |
||||
iHSLen = authMsgLen + eciesBytes // size of the final ECIES payload sent as initiator's handshake
|
||||
rHSLen = authRespLen + eciesBytes // size of the final ECIES payload sent as receiver's handshake
|
||||
) |
||||
|
||||
type hexkey []byte |
||||
|
||||
func (self hexkey) String() string { |
||||
return fmt.Sprintf("(%d) %x", len(self), []byte(self)) |
||||
} |
||||
|
||||
func encHandshake(conn io.ReadWriter, prv *ecdsa.PrivateKey, dial *discover.Node) ( |
||||
remoteID discover.NodeID, |
||||
sessionToken []byte, |
||||
err error, |
||||
) { |
||||
if dial == nil { |
||||
var remotePubkey []byte |
||||
sessionToken, remotePubkey, err = inboundEncHandshake(conn, prv, nil) |
||||
copy(remoteID[:], remotePubkey) |
||||
} else { |
||||
remoteID = dial.ID |
||||
sessionToken, err = outboundEncHandshake(conn, prv, remoteID[:], nil) |
||||
} |
||||
return remoteID, sessionToken, err |
||||
} |
||||
|
||||
// outboundEncHandshake negotiates a session token on conn.
|
||||
// it should be called on the dialing side of the connection.
|
||||
//
|
||||
// privateKey is the local client's private key
|
||||
// remotePublicKey is the remote peer's node ID
|
||||
// sessionToken is the token from a previous session with this node.
|
||||
func outboundEncHandshake(conn io.ReadWriter, prvKey *ecdsa.PrivateKey, remotePublicKey []byte, sessionToken []byte) ( |
||||
newSessionToken []byte, |
||||
err error, |
||||
) { |
||||
auth, initNonce, randomPrivKey, err := authMsg(prvKey, remotePublicKey, sessionToken) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if sessionToken != nil { |
||||
clogger.Debugf("session-token: %v", hexkey(sessionToken)) |
||||
} |
||||
|
||||
clogger.Debugf("initiator-nonce: %v", hexkey(initNonce)) |
||||
clogger.Debugf("initiator-random-private-key: %v", hexkey(crypto.FromECDSA(randomPrivKey))) |
||||
randomPublicKeyS, _ := exportPublicKey(&randomPrivKey.PublicKey) |
||||
clogger.Debugf("initiator-random-public-key: %v", hexkey(randomPublicKeyS)) |
||||
if _, err = conn.Write(auth); err != nil { |
||||
return nil, err |
||||
} |
||||
clogger.Debugf("initiator handshake: %v", hexkey(auth)) |
||||
|
||||
response := make([]byte, rHSLen) |
||||
if _, err = io.ReadFull(conn, response); err != nil { |
||||
return nil, err |
||||
} |
||||
recNonce, remoteRandomPubKey, _, err := completeHandshake(response, prvKey) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
clogger.Debugf("receiver-nonce: %v", hexkey(recNonce)) |
||||
remoteRandomPubKeyS, _ := exportPublicKey(remoteRandomPubKey) |
||||
clogger.Debugf("receiver-random-public-key: %v", hexkey(remoteRandomPubKeyS)) |
||||
return newSession(initNonce, recNonce, randomPrivKey, remoteRandomPubKey) |
||||
} |
||||
|
||||
// authMsg creates the initiator handshake.
|
||||
func authMsg(prvKey *ecdsa.PrivateKey, remotePubKeyS, sessionToken []byte) ( |
||||
auth, initNonce []byte, |
||||
randomPrvKey *ecdsa.PrivateKey, |
||||
err error, |
||||
) { |
||||
// session init, common to both parties
|
||||
remotePubKey, err := importPublicKey(remotePubKeyS) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
var tokenFlag byte // = 0x00
|
||||
if sessionToken == nil { |
||||
// no session token found means we need to generate shared secret.
|
||||
// ecies shared secret is used as initial session token for new peers
|
||||
// generate shared key from prv and remote pubkey
|
||||
if sessionToken, err = ecies.ImportECDSA(prvKey).GenerateShared(ecies.ImportECDSAPublic(remotePubKey), sskLen, sskLen); err != nil { |
||||
return |
||||
} |
||||
// tokenFlag = 0x00 // redundant
|
||||
} else { |
||||
// for known peers, we use stored token from the previous session
|
||||
tokenFlag = 0x01 |
||||
} |
||||
|
||||
//E(remote-pubk, S(ecdhe-random, ecdh-shared-secret^nonce) || H(ecdhe-random-pubk) || pubk || nonce || 0x0)
|
||||
// E(remote-pubk, S(ecdhe-random, token^nonce) || H(ecdhe-random-pubk) || pubk || nonce || 0x1)
|
||||
// allocate msgLen long message,
|
||||
var msg []byte = make([]byte, authMsgLen) |
||||
initNonce = msg[authMsgLen-shaLen-1 : authMsgLen-1] |
||||
if _, err = rand.Read(initNonce); err != nil { |
||||
return |
||||
} |
||||
// create known message
|
||||
// ecdh-shared-secret^nonce for new peers
|
||||
// token^nonce for old peers
|
||||
var sharedSecret = xor(sessionToken, initNonce) |
||||
|
||||
// generate random keypair to use for signing
|
||||
if randomPrvKey, err = crypto.GenerateKey(); err != nil { |
||||
return |
||||
} |
||||
// sign shared secret (message known to both parties): shared-secret
|
||||
var signature []byte |
||||
// signature = sign(ecdhe-random, shared-secret)
|
||||
// uses secp256k1.Sign
|
||||
if signature, err = crypto.Sign(sharedSecret, randomPrvKey); err != nil { |
||||
return |
||||
} |
||||
|
||||
// message
|
||||
// signed-shared-secret || H(ecdhe-random-pubk) || pubk || nonce || 0x0
|
||||
copy(msg, signature) // copy signed-shared-secret
|
||||
// H(ecdhe-random-pubk)
|
||||
var randomPubKey64 []byte |
||||
if randomPubKey64, err = exportPublicKey(&randomPrvKey.PublicKey); err != nil { |
||||
return |
||||
} |
||||
var pubKey64 []byte |
||||
if pubKey64, err = exportPublicKey(&prvKey.PublicKey); err != nil { |
||||
return |
||||
} |
||||
copy(msg[sigLen:sigLen+shaLen], crypto.Sha3(randomPubKey64)) |
||||
// pubkey copied to the correct segment.
|
||||
copy(msg[sigLen+shaLen:sigLen+shaLen+pubLen], pubKey64) |
||||
// nonce is already in the slice
|
||||
// stick tokenFlag byte to the end
|
||||
msg[authMsgLen-1] = tokenFlag |
||||
|
||||
// encrypt using remote-pubk
|
||||
// auth = eciesEncrypt(remote-pubk, msg)
|
||||
if auth, err = crypto.Encrypt(remotePubKey, msg); err != nil { |
||||
return |
||||
} |
||||
return |
||||
} |
||||
|
||||
// completeHandshake is called when the initiator receives an
|
||||
// authentication response (aka receiver handshake). It completes the
|
||||
// handshake by reading off parameters the remote peer provides needed
|
||||
// to set up the secure session.
|
||||
func completeHandshake(auth []byte, prvKey *ecdsa.PrivateKey) ( |
||||
respNonce []byte, |
||||
remoteRandomPubKey *ecdsa.PublicKey, |
||||
tokenFlag bool, |
||||
err error, |
||||
) { |
||||
var msg []byte |
||||
// they prove that msg is meant for me,
|
||||
// I prove I possess private key if i can read it
|
||||
if msg, err = crypto.Decrypt(prvKey, auth); err != nil { |
||||
return |
||||
} |
||||
|
||||
respNonce = msg[pubLen : pubLen+shaLen] |
||||
var remoteRandomPubKeyS = msg[:pubLen] |
||||
if remoteRandomPubKey, err = importPublicKey(remoteRandomPubKeyS); err != nil { |
||||
return |
||||
} |
||||
if msg[authRespLen-1] == 0x01 { |
||||
tokenFlag = true |
||||
} |
||||
return |
||||
} |
||||
|
||||
// inboundEncHandshake negotiates a session token on conn.
|
||||
// it should be called on the listening side of the connection.
|
||||
//
|
||||
// privateKey is the local client's private key
|
||||
// sessionToken is the token from a previous session with this node.
|
||||
func inboundEncHandshake(conn io.ReadWriter, prvKey *ecdsa.PrivateKey, sessionToken []byte) ( |
||||
token, remotePubKey []byte, |
||||
err error, |
||||
) { |
||||
// we are listening connection. we are responders in the
|
||||
// handshake. Extract info from the authentication. The initiator
|
||||
// starts by sending us a handshake that we need to respond to. so
|
||||
// we read auth message first, then respond.
|
||||
auth := make([]byte, iHSLen) |
||||
if _, err := io.ReadFull(conn, auth); err != nil { |
||||
return nil, nil, err |
||||
} |
||||
response, recNonce, initNonce, remotePubKey, randomPrivKey, remoteRandomPubKey, err := authResp(auth, sessionToken, prvKey) |
||||
if err != nil { |
||||
return nil, nil, err |
||||
} |
||||
clogger.Debugf("receiver-nonce: %v", hexkey(recNonce)) |
||||
clogger.Debugf("receiver-random-priv-key: %v", hexkey(crypto.FromECDSA(randomPrivKey))) |
||||
if _, err = conn.Write(response); err != nil { |
||||
return nil, nil, err |
||||
} |
||||
clogger.Debugf("receiver handshake:\n%v", hexkey(response)) |
||||
token, err = newSession(initNonce, recNonce, randomPrivKey, remoteRandomPubKey) |
||||
return token, remotePubKey, err |
||||
} |
||||
|
||||
// authResp is called by peer if it accepted (but not
|
||||
// initiated) the connection from the remote. It is passed the initiator
|
||||
// handshake received and the session token belonging to the
|
||||
// remote initiator.
|
||||
//
|
||||
// The first return value is the authentication response (aka receiver
|
||||
// handshake) that is to be sent to the remote initiator.
|
||||
func authResp(auth, sessionToken []byte, prvKey *ecdsa.PrivateKey) ( |
||||
authResp, respNonce, initNonce, remotePubKeyS []byte, |
||||
randomPrivKey *ecdsa.PrivateKey, |
||||
remoteRandomPubKey *ecdsa.PublicKey, |
||||
err error, |
||||
) { |
||||
// they prove that msg is meant for me,
|
||||
// I prove I possess private key if i can read it
|
||||
msg, err := crypto.Decrypt(prvKey, auth) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
remotePubKeyS = msg[sigLen+shaLen : sigLen+shaLen+pubLen] |
||||
remotePubKey, _ := importPublicKey(remotePubKeyS) |
||||
|
||||
var tokenFlag byte |
||||
if sessionToken == nil { |
||||
// no session token found means we need to generate shared secret.
|
||||
// ecies shared secret is used as initial session token for new peers
|
||||
// generate shared key from prv and remote pubkey
|
||||
if sessionToken, err = ecies.ImportECDSA(prvKey).GenerateShared(ecies.ImportECDSAPublic(remotePubKey), sskLen, sskLen); err != nil { |
||||
return |
||||
} |
||||
// tokenFlag = 0x00 // redundant
|
||||
} else { |
||||
// for known peers, we use stored token from the previous session
|
||||
tokenFlag = 0x01 |
||||
} |
||||
|
||||
// the initiator nonce is read off the end of the message
|
||||
initNonce = msg[authMsgLen-shaLen-1 : authMsgLen-1] |
||||
// I prove that i own prv key (to derive shared secret, and read
|
||||
// nonce off encrypted msg) and that I own shared secret they
|
||||
// prove they own the private key belonging to ecdhe-random-pubk
|
||||
// we can now reconstruct the signed message and recover the peers
|
||||
// pubkey
|
||||
var signedMsg = xor(sessionToken, initNonce) |
||||
var remoteRandomPubKeyS []byte |
||||
if remoteRandomPubKeyS, err = secp256k1.RecoverPubkey(signedMsg, msg[:sigLen]); err != nil { |
||||
return |
||||
} |
||||
// convert to ECDSA standard
|
||||
if remoteRandomPubKey, err = importPublicKey(remoteRandomPubKeyS); err != nil { |
||||
return |
||||
} |
||||
|
||||
// now we find ourselves a long task too, fill it random
|
||||
var resp = make([]byte, authRespLen) |
||||
// generate shaLen long nonce
|
||||
respNonce = resp[pubLen : pubLen+shaLen] |
||||
if _, err = rand.Read(respNonce); err != nil { |
||||
return |
||||
} |
||||
// generate random keypair for session
|
||||
if randomPrivKey, err = crypto.GenerateKey(); err != nil { |
||||
return |
||||
} |
||||
// responder auth message
|
||||
// E(remote-pubk, ecdhe-random-pubk || nonce || 0x0)
|
||||
var randomPubKeyS []byte |
||||
if randomPubKeyS, err = exportPublicKey(&randomPrivKey.PublicKey); err != nil { |
||||
return |
||||
} |
||||
copy(resp[:pubLen], randomPubKeyS) |
||||
// nonce is already in the slice
|
||||
resp[authRespLen-1] = tokenFlag |
||||
|
||||
// encrypt using remote-pubk
|
||||
// auth = eciesEncrypt(remote-pubk, msg)
|
||||
// why not encrypt with ecdhe-random-remote
|
||||
if authResp, err = crypto.Encrypt(remotePubKey, resp); err != nil { |
||||
return |
||||
} |
||||
return |
||||
} |
||||
|
||||
// newSession is called after the handshake is completed. The
|
||||
// arguments are values negotiated in the handshake. The return value
|
||||
// is a new session Token to be remembered for the next time we
|
||||
// connect with this peer.
|
||||
func newSession(initNonce, respNonce []byte, privKey *ecdsa.PrivateKey, remoteRandomPubKey *ecdsa.PublicKey) ([]byte, error) { |
||||
// 3) Now we can trust ecdhe-random-pubk to derive new keys
|
||||
//ecdhe-shared-secret = ecdh.agree(ecdhe-random, remote-ecdhe-random-pubk)
|
||||
pubKey := ecies.ImportECDSAPublic(remoteRandomPubKey) |
||||
dhSharedSecret, err := ecies.ImportECDSA(privKey).GenerateShared(pubKey, sskLen, sskLen) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
sharedSecret := crypto.Sha3(dhSharedSecret, crypto.Sha3(respNonce, initNonce)) |
||||
sessionToken := crypto.Sha3(sharedSecret) |
||||
return sessionToken, nil |
||||
} |
||||
|
||||
// importPublicKey unmarshals 512 bit public keys.
|
||||
func importPublicKey(pubKey []byte) (pubKeyEC *ecdsa.PublicKey, err error) { |
||||
var pubKey65 []byte |
||||
switch len(pubKey) { |
||||
case 64: |
||||
// add 'uncompressed key' flag
|
||||
pubKey65 = append([]byte{0x04}, pubKey...) |
||||
case 65: |
||||
pubKey65 = pubKey |
||||
default: |
||||
return nil, fmt.Errorf("invalid public key length %v (expect 64/65)", len(pubKey)) |
||||
} |
||||
return crypto.ToECDSAPub(pubKey65), nil |
||||
} |
||||
|
||||
func exportPublicKey(pubKeyEC *ecdsa.PublicKey) (pubKey []byte, err error) { |
||||
if pubKeyEC == nil { |
||||
return nil, fmt.Errorf("no ECDSA public key given") |
||||
} |
||||
return crypto.FromECDSAPub(pubKeyEC)[1:], nil |
||||
} |
||||
|
||||
func xor(one, other []byte) (xor []byte) { |
||||
xor = make([]byte, len(one)) |
||||
for i := 0; i < len(one); i++ { |
||||
xor[i] = one[i] ^ other[i] |
||||
} |
||||
return xor |
||||
} |
@ -0,0 +1,167 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/ecdsa" |
||||
"crypto/rand" |
||||
"net" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/obscuren/ecies" |
||||
) |
||||
|
||||
func TestPublicKeyEncoding(t *testing.T) { |
||||
prv0, _ := crypto.GenerateKey() // = ecdsa.GenerateKey(crypto.S256(), rand.Reader)
|
||||
pub0 := &prv0.PublicKey |
||||
pub0s := crypto.FromECDSAPub(pub0) |
||||
pub1, err := importPublicKey(pub0s) |
||||
if err != nil { |
||||
t.Errorf("%v", err) |
||||
} |
||||
eciesPub1 := ecies.ImportECDSAPublic(pub1) |
||||
if eciesPub1 == nil { |
||||
t.Errorf("invalid ecdsa public key") |
||||
} |
||||
pub1s, err := exportPublicKey(pub1) |
||||
if err != nil { |
||||
t.Errorf("%v", err) |
||||
} |
||||
if len(pub1s) != 64 { |
||||
t.Errorf("wrong length expect 64, got", len(pub1s)) |
||||
} |
||||
pub2, err := importPublicKey(pub1s) |
||||
if err != nil { |
||||
t.Errorf("%v", err) |
||||
} |
||||
pub2s, err := exportPublicKey(pub2) |
||||
if err != nil { |
||||
t.Errorf("%v", err) |
||||
} |
||||
if !bytes.Equal(pub1s, pub2s) { |
||||
t.Errorf("exports dont match") |
||||
} |
||||
pub2sEC := crypto.FromECDSAPub(pub2) |
||||
if !bytes.Equal(pub0s, pub2sEC) { |
||||
t.Errorf("exports dont match") |
||||
} |
||||
} |
||||
|
||||
func TestSharedSecret(t *testing.T) { |
||||
prv0, _ := crypto.GenerateKey() // = ecdsa.GenerateKey(crypto.S256(), rand.Reader)
|
||||
pub0 := &prv0.PublicKey |
||||
prv1, _ := crypto.GenerateKey() |
||||
pub1 := &prv1.PublicKey |
||||
|
||||
ss0, err := ecies.ImportECDSA(prv0).GenerateShared(ecies.ImportECDSAPublic(pub1), sskLen, sskLen) |
||||
if err != nil { |
||||
return |
||||
} |
||||
ss1, err := ecies.ImportECDSA(prv1).GenerateShared(ecies.ImportECDSAPublic(pub0), sskLen, sskLen) |
||||
if err != nil { |
||||
return |
||||
} |
||||
t.Logf("Secret:\n%v %x\n%v %x", len(ss0), ss0, len(ss0), ss1) |
||||
if !bytes.Equal(ss0, ss1) { |
||||
t.Errorf("dont match :(") |
||||
} |
||||
} |
||||
|
||||
func TestCryptoHandshake(t *testing.T) { |
||||
testCryptoHandshake(newkey(), newkey(), nil, t) |
||||
} |
||||
|
||||
func TestCryptoHandshakeWithToken(t *testing.T) { |
||||
sessionToken := make([]byte, shaLen) |
||||
rand.Read(sessionToken) |
||||
testCryptoHandshake(newkey(), newkey(), sessionToken, t) |
||||
} |
||||
|
||||
func testCryptoHandshake(prv0, prv1 *ecdsa.PrivateKey, sessionToken []byte, t *testing.T) { |
||||
var err error |
||||
// pub0 := &prv0.PublicKey
|
||||
pub1 := &prv1.PublicKey |
||||
|
||||
// pub0s := crypto.FromECDSAPub(pub0)
|
||||
pub1s := crypto.FromECDSAPub(pub1) |
||||
|
||||
// simulate handshake by feeding output to input
|
||||
// initiator sends handshake 'auth'
|
||||
auth, initNonce, randomPrivKey, err := authMsg(prv0, pub1s, sessionToken) |
||||
if err != nil { |
||||
t.Errorf("%v", err) |
||||
} |
||||
t.Logf("-> %v", hexkey(auth)) |
||||
|
||||
// receiver reads auth and responds with response
|
||||
response, remoteRecNonce, remoteInitNonce, _, remoteRandomPrivKey, remoteInitRandomPubKey, err := authResp(auth, sessionToken, prv1) |
||||
if err != nil { |
||||
t.Errorf("%v", err) |
||||
} |
||||
t.Logf("<- %v\n", hexkey(response)) |
||||
|
||||
// initiator reads receiver's response and the key exchange completes
|
||||
recNonce, remoteRandomPubKey, _, err := completeHandshake(response, prv0) |
||||
if err != nil { |
||||
t.Errorf("completeHandshake error: %v", err) |
||||
} |
||||
|
||||
// now both parties should have the same session parameters
|
||||
initSessionToken, err := newSession(initNonce, recNonce, randomPrivKey, remoteRandomPubKey) |
||||
if err != nil { |
||||
t.Errorf("newSession error: %v", err) |
||||
} |
||||
|
||||
recSessionToken, err := newSession(remoteInitNonce, remoteRecNonce, remoteRandomPrivKey, remoteInitRandomPubKey) |
||||
if err != nil { |
||||
t.Errorf("newSession error: %v", err) |
||||
} |
||||
|
||||
// fmt.Printf("\nauth (%v) %x\n\nresp (%v) %x\n\n", len(auth), auth, len(response), response)
|
||||
|
||||
// fmt.Printf("\nauth %x\ninitNonce %x\nresponse%x\nremoteRecNonce %x\nremoteInitNonce %x\nremoteRandomPubKey %x\nrecNonce %x\nremoteInitRandomPubKey %x\ninitSessionToken %x\n\n", auth, initNonce, response, remoteRecNonce, remoteInitNonce, remoteRandomPubKey, recNonce, remoteInitRandomPubKey, initSessionToken)
|
||||
|
||||
if !bytes.Equal(initNonce, remoteInitNonce) { |
||||
t.Errorf("nonces do not match") |
||||
} |
||||
if !bytes.Equal(recNonce, remoteRecNonce) { |
||||
t.Errorf("receiver nonces do not match") |
||||
} |
||||
if !bytes.Equal(initSessionToken, recSessionToken) { |
||||
t.Errorf("session tokens do not match") |
||||
} |
||||
} |
||||
|
||||
func TestHandshake(t *testing.T) { |
||||
defer testlog(t).detach() |
||||
|
||||
prv0, _ := crypto.GenerateKey() |
||||
prv1, _ := crypto.GenerateKey() |
||||
pub0s, _ := exportPublicKey(&prv0.PublicKey) |
||||
pub1s, _ := exportPublicKey(&prv1.PublicKey) |
||||
rw0, rw1 := net.Pipe() |
||||
tokens := make(chan []byte) |
||||
|
||||
go func() { |
||||
token, err := outboundEncHandshake(rw0, prv0, pub1s, nil) |
||||
if err != nil { |
||||
t.Errorf("outbound side error: %v", err) |
||||
} |
||||
tokens <- token |
||||
}() |
||||
go func() { |
||||
token, remotePubkey, err := inboundEncHandshake(rw1, prv1, nil) |
||||
if err != nil { |
||||
t.Errorf("inbound side error: %v", err) |
||||
} |
||||
if !bytes.Equal(remotePubkey, pub0s) { |
||||
t.Errorf("inbound side returned wrong remote pubkey\n got: %x\n want: %x", remotePubkey, pub0s) |
||||
} |
||||
tokens <- token |
||||
}() |
||||
|
||||
t1, t2 := <-tokens, <-tokens |
||||
if !bytes.Equal(t1, t2) { |
||||
t.Error("session token mismatch") |
||||
} |
||||
} |
@ -0,0 +1,291 @@ |
||||
package discover |
||||
|
||||
import ( |
||||
"crypto/ecdsa" |
||||
"crypto/elliptic" |
||||
"encoding/hex" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"math/rand" |
||||
"net" |
||||
"net/url" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
const nodeIDBits = 512 |
||||
|
||||
// Node represents a host on the network.
|
||||
type Node struct { |
||||
ID NodeID |
||||
IP net.IP |
||||
|
||||
DiscPort int // UDP listening port for discovery protocol
|
||||
TCPPort int // TCP listening port for RLPx
|
||||
|
||||
active time.Time |
||||
} |
||||
|
||||
func newNode(id NodeID, addr *net.UDPAddr) *Node { |
||||
return &Node{ |
||||
ID: id, |
||||
IP: addr.IP, |
||||
DiscPort: addr.Port, |
||||
TCPPort: addr.Port, |
||||
active: time.Now(), |
||||
} |
||||
} |
||||
|
||||
func (n *Node) isValid() bool { |
||||
// TODO: don't accept localhost, LAN addresses from internet hosts
|
||||
return !n.IP.IsMulticast() && !n.IP.IsUnspecified() && n.TCPPort != 0 && n.DiscPort != 0 |
||||
} |
||||
|
||||
// The string representation of a Node is a URL.
|
||||
// Please see ParseNode for a description of the format.
|
||||
func (n *Node) String() string { |
||||
addr := net.TCPAddr{IP: n.IP, Port: n.TCPPort} |
||||
u := url.URL{ |
||||
Scheme: "enode", |
||||
User: url.User(fmt.Sprintf("%x", n.ID[:])), |
||||
Host: addr.String(), |
||||
} |
||||
if n.DiscPort != n.TCPPort { |
||||
u.RawQuery = "discport=" + strconv.Itoa(n.DiscPort) |
||||
} |
||||
return u.String() |
||||
} |
||||
|
||||
// ParseNode parses a node URL.
|
||||
//
|
||||
// A node URL has scheme "enode".
|
||||
//
|
||||
// The hexadecimal node ID is encoded in the username portion of the
|
||||
// URL, separated from the host by an @ sign. The hostname can only be
|
||||
// given as an IP address, DNS domain names are not allowed. The port
|
||||
// in the host name section is the TCP listening port. If the TCP and
|
||||
// UDP (discovery) ports differ, the UDP port is specified as query
|
||||
// parameter "discport".
|
||||
//
|
||||
// In the following example, the node URL describes
|
||||
// a node with IP address 10.3.58.6, TCP listening port 30303
|
||||
// and UDP discovery port 30301.
|
||||
//
|
||||
// enode://<hex node id>@10.3.58.6:30303?discport=30301
|
||||
func ParseNode(rawurl string) (*Node, error) { |
||||
var n Node |
||||
u, err := url.Parse(rawurl) |
||||
if u.Scheme != "enode" { |
||||
return nil, errors.New("invalid URL scheme, want \"enode\"") |
||||
} |
||||
if u.User == nil { |
||||
return nil, errors.New("does not contain node ID") |
||||
} |
||||
if n.ID, err = HexID(u.User.String()); err != nil { |
||||
return nil, fmt.Errorf("invalid node ID (%v)", err) |
||||
} |
||||
ip, port, err := net.SplitHostPort(u.Host) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("invalid host: %v", err) |
||||
} |
||||
if n.IP = net.ParseIP(ip); n.IP == nil { |
||||
return nil, errors.New("invalid IP address") |
||||
} |
||||
if n.TCPPort, err = strconv.Atoi(port); err != nil { |
||||
return nil, errors.New("invalid port") |
||||
} |
||||
qv := u.Query() |
||||
if qv.Get("discport") == "" { |
||||
n.DiscPort = n.TCPPort |
||||
} else { |
||||
if n.DiscPort, err = strconv.Atoi(qv.Get("discport")); err != nil { |
||||
return nil, errors.New("invalid discport in query") |
||||
} |
||||
} |
||||
return &n, nil |
||||
} |
||||
|
||||
// MustParseNode parses a node URL. It panics if the URL is not valid.
|
||||
func MustParseNode(rawurl string) *Node { |
||||
n, err := ParseNode(rawurl) |
||||
if err != nil { |
||||
panic("invalid node URL: " + err.Error()) |
||||
} |
||||
return n |
||||
} |
||||
|
||||
func (n Node) EncodeRLP(w io.Writer) error { |
||||
return rlp.Encode(w, rpcNode{IP: n.IP.String(), Port: uint16(n.TCPPort), ID: n.ID}) |
||||
} |
||||
func (n *Node) DecodeRLP(s *rlp.Stream) (err error) { |
||||
var ext rpcNode |
||||
if err = s.Decode(&ext); err == nil { |
||||
n.TCPPort = int(ext.Port) |
||||
n.DiscPort = int(ext.Port) |
||||
n.ID = ext.ID |
||||
if n.IP = net.ParseIP(ext.IP); n.IP == nil { |
||||
return errors.New("invalid IP string") |
||||
} |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// NodeID is a unique identifier for each node.
|
||||
// The node identifier is a marshaled elliptic curve public key.
|
||||
type NodeID [nodeIDBits / 8]byte |
||||
|
||||
// NodeID prints as a long hexadecimal number.
|
||||
func (n NodeID) String() string { |
||||
return fmt.Sprintf("%#x", n[:]) |
||||
} |
||||
|
||||
// The Go syntax representation of a NodeID is a call to HexID.
|
||||
func (n NodeID) GoString() string { |
||||
return fmt.Sprintf("discover.HexID(\"%#x\")", n[:]) |
||||
} |
||||
|
||||
// HexID converts a hex string to a NodeID.
|
||||
// The string may be prefixed with 0x.
|
||||
func HexID(in string) (NodeID, error) { |
||||
if strings.HasPrefix(in, "0x") { |
||||
in = in[2:] |
||||
} |
||||
var id NodeID |
||||
b, err := hex.DecodeString(in) |
||||
if err != nil { |
||||
return id, err |
||||
} else if len(b) != len(id) { |
||||
return id, fmt.Errorf("wrong length, need %d hex bytes", len(id)) |
||||
} |
||||
copy(id[:], b) |
||||
return id, nil |
||||
} |
||||
|
||||
// MustHexID converts a hex string to a NodeID.
|
||||
// It panics if the string is not a valid NodeID.
|
||||
func MustHexID(in string) NodeID { |
||||
id, err := HexID(in) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return id |
||||
} |
||||
|
||||
// PubkeyID returns a marshaled representation of the given public key.
|
||||
func PubkeyID(pub *ecdsa.PublicKey) NodeID { |
||||
var id NodeID |
||||
pbytes := elliptic.Marshal(pub.Curve, pub.X, pub.Y) |
||||
if len(pbytes)-1 != len(id) { |
||||
panic(fmt.Errorf("need %d bit pubkey, got %d bits", (len(id)+1)*8, len(pbytes))) |
||||
} |
||||
copy(id[:], pbytes[1:]) |
||||
return id |
||||
} |
||||
|
||||
// recoverNodeID computes the public key used to sign the
|
||||
// given hash from the signature.
|
||||
func recoverNodeID(hash, sig []byte) (id NodeID, err error) { |
||||
pubkey, err := secp256k1.RecoverPubkey(hash, sig) |
||||
if err != nil { |
||||
return id, err |
||||
} |
||||
if len(pubkey)-1 != len(id) { |
||||
return id, fmt.Errorf("recovered pubkey has %d bits, want %d bits", len(pubkey)*8, (len(id)+1)*8) |
||||
} |
||||
for i := range id { |
||||
id[i] = pubkey[i+1] |
||||
} |
||||
return id, nil |
||||
} |
||||
|
||||
// distcmp compares the distances a->target and b->target.
|
||||
// Returns -1 if a is closer to target, 1 if b is closer to target
|
||||
// and 0 if they are equal.
|
||||
func distcmp(target, a, b NodeID) int { |
||||
for i := range target { |
||||
da := a[i] ^ target[i] |
||||
db := b[i] ^ target[i] |
||||
if da > db { |
||||
return 1 |
||||
} else if da < db { |
||||
return -1 |
||||
} |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
// table of leading zero counts for bytes [0..255]
|
||||
var lzcount = [256]int{ |
||||
8, 7, 6, 6, 5, 5, 5, 5, |
||||
4, 4, 4, 4, 4, 4, 4, 4, |
||||
3, 3, 3, 3, 3, 3, 3, 3, |
||||
3, 3, 3, 3, 3, 3, 3, 3, |
||||
2, 2, 2, 2, 2, 2, 2, 2, |
||||
2, 2, 2, 2, 2, 2, 2, 2, |
||||
2, 2, 2, 2, 2, 2, 2, 2, |
||||
2, 2, 2, 2, 2, 2, 2, 2, |
||||
1, 1, 1, 1, 1, 1, 1, 1, |
||||
1, 1, 1, 1, 1, 1, 1, 1, |
||||
1, 1, 1, 1, 1, 1, 1, 1, |
||||
1, 1, 1, 1, 1, 1, 1, 1, |
||||
1, 1, 1, 1, 1, 1, 1, 1, |
||||
1, 1, 1, 1, 1, 1, 1, 1, |
||||
1, 1, 1, 1, 1, 1, 1, 1, |
||||
1, 1, 1, 1, 1, 1, 1, 1, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
0, 0, 0, 0, 0, 0, 0, 0, |
||||
} |
||||
|
||||
// logdist returns the logarithmic distance between a and b, log2(a ^ b).
|
||||
func logdist(a, b NodeID) int { |
||||
lz := 0 |
||||
for i := range a { |
||||
x := a[i] ^ b[i] |
||||
if x == 0 { |
||||
lz += 8 |
||||
} else { |
||||
lz += lzcount[x] |
||||
break |
||||
} |
||||
} |
||||
return len(a)*8 - lz |
||||
} |
||||
|
||||
// randomID returns a random NodeID such that logdist(a, b) == n
|
||||
func randomID(a NodeID, n int) (b NodeID) { |
||||
if n == 0 { |
||||
return a |
||||
} |
||||
// flip bit at position n, fill the rest with random bits
|
||||
b = a |
||||
pos := len(a) - n/8 - 1 |
||||
bit := byte(0x01) << (byte(n%8) - 1) |
||||
if bit == 0 { |
||||
pos++ |
||||
bit = 0x80 |
||||
} |
||||
b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits
|
||||
for i := pos + 1; i < len(a); i++ { |
||||
b[i] = byte(rand.Intn(255)) |
||||
} |
||||
return b |
||||
} |
@ -0,0 +1,201 @@ |
||||
package discover |
||||
|
||||
import ( |
||||
"math/big" |
||||
"math/rand" |
||||
"net" |
||||
"reflect" |
||||
"testing" |
||||
"testing/quick" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
) |
||||
|
||||
var ( |
||||
quickrand = rand.New(rand.NewSource(time.Now().Unix())) |
||||
quickcfg = &quick.Config{MaxCount: 5000, Rand: quickrand} |
||||
) |
||||
|
||||
var parseNodeTests = []struct { |
||||
rawurl string |
||||
wantError string |
||||
wantResult *Node |
||||
}{ |
||||
{ |
||||
rawurl: "http://foobar", |
||||
wantError: `invalid URL scheme, want "enode"`, |
||||
}, |
||||
{ |
||||
rawurl: "enode://foobar", |
||||
wantError: `does not contain node ID`, |
||||
}, |
||||
{ |
||||
rawurl: "enode://01010101@123.124.125.126:3", |
||||
wantError: `invalid node ID (wrong length, need 64 hex bytes)`, |
||||
}, |
||||
{ |
||||
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@hostname:3", |
||||
wantError: `invalid IP address`, |
||||
}, |
||||
{ |
||||
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:foo", |
||||
wantError: `invalid port`, |
||||
}, |
||||
{ |
||||
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:3?discport=foo", |
||||
wantError: `invalid discport in query`, |
||||
}, |
||||
{ |
||||
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150", |
||||
wantResult: &Node{ |
||||
ID: MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), |
||||
IP: net.ParseIP("127.0.0.1"), |
||||
DiscPort: 52150, |
||||
TCPPort: 52150, |
||||
}, |
||||
}, |
||||
{ |
||||
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[::]:52150", |
||||
wantResult: &Node{ |
||||
ID: MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), |
||||
IP: net.ParseIP("::"), |
||||
DiscPort: 52150, |
||||
TCPPort: 52150, |
||||
}, |
||||
}, |
||||
{ |
||||
rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150?discport=223344", |
||||
wantResult: &Node{ |
||||
ID: MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), |
||||
IP: net.ParseIP("127.0.0.1"), |
||||
DiscPort: 223344, |
||||
TCPPort: 52150, |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
func TestParseNode(t *testing.T) { |
||||
for i, test := range parseNodeTests { |
||||
n, err := ParseNode(test.rawurl) |
||||
if err == nil && test.wantError != "" { |
||||
t.Errorf("test %d: got nil error, expected %#q", i, test.wantError) |
||||
continue |
||||
} |
||||
if err != nil && err.Error() != test.wantError { |
||||
t.Errorf("test %d: got error %#q, expected %#q", i, err.Error(), test.wantError) |
||||
continue |
||||
} |
||||
if !reflect.DeepEqual(n, test.wantResult) { |
||||
t.Errorf("test %d: result mismatch:\ngot: %#v, want: %#v", i, n, test.wantResult) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestNodeString(t *testing.T) { |
||||
for i, test := range parseNodeTests { |
||||
if test.wantError != "" { |
||||
continue |
||||
} |
||||
str := test.wantResult.String() |
||||
if str != test.rawurl { |
||||
t.Errorf("test %d: Node.String() mismatch:\ngot: %s\nwant: %s", i, str, test.rawurl) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestHexID(t *testing.T) { |
||||
ref := NodeID{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 106, 217, 182, 31, 165, 174, 1, 67, 7, 235, 220, 150, 66, 83, 173, 205, 159, 44, 10, 57, 42, 161, 26, 188} |
||||
id1 := MustHexID("0x000000000000000000000000000000000000000000000000000000000000000000000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc") |
||||
id2 := MustHexID("000000000000000000000000000000000000000000000000000000000000000000000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc") |
||||
|
||||
if id1 != ref { |
||||
t.Errorf("wrong id1\ngot %v\nwant %v", id1[:], ref[:]) |
||||
} |
||||
if id2 != ref { |
||||
t.Errorf("wrong id2\ngot %v\nwant %v", id2[:], ref[:]) |
||||
} |
||||
} |
||||
|
||||
func TestNodeID_recover(t *testing.T) { |
||||
prv := newkey() |
||||
hash := make([]byte, 32) |
||||
sig, err := crypto.Sign(hash, prv) |
||||
if err != nil { |
||||
t.Fatalf("signing error: %v", err) |
||||
} |
||||
|
||||
pub := PubkeyID(&prv.PublicKey) |
||||
recpub, err := recoverNodeID(hash, sig) |
||||
if err != nil { |
||||
t.Fatalf("recovery error: %v", err) |
||||
} |
||||
if pub != recpub { |
||||
t.Errorf("recovered wrong pubkey:\ngot: %v\nwant: %v", recpub, pub) |
||||
} |
||||
} |
||||
|
||||
func TestNodeID_distcmp(t *testing.T) { |
||||
distcmpBig := func(target, a, b NodeID) int { |
||||
tbig := new(big.Int).SetBytes(target[:]) |
||||
abig := new(big.Int).SetBytes(a[:]) |
||||
bbig := new(big.Int).SetBytes(b[:]) |
||||
return new(big.Int).Xor(tbig, abig).Cmp(new(big.Int).Xor(tbig, bbig)) |
||||
} |
||||
if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg); err != nil { |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
// the random tests is likely to miss the case where they're equal.
|
||||
func TestNodeID_distcmpEqual(t *testing.T) { |
||||
base := NodeID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} |
||||
x := NodeID{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} |
||||
if distcmp(base, x, x) != 0 { |
||||
t.Errorf("distcmp(base, x, x) != 0") |
||||
} |
||||
} |
||||
|
||||
func TestNodeID_logdist(t *testing.T) { |
||||
logdistBig := func(a, b NodeID) int { |
||||
abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:]) |
||||
return new(big.Int).Xor(abig, bbig).BitLen() |
||||
} |
||||
if err := quick.CheckEqual(logdist, logdistBig, quickcfg); err != nil { |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
// the random tests is likely to miss the case where they're equal.
|
||||
func TestNodeID_logdistEqual(t *testing.T) { |
||||
x := NodeID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} |
||||
if logdist(x, x) != 0 { |
||||
t.Errorf("logdist(x, x) != 0") |
||||
} |
||||
} |
||||
|
||||
func TestNodeID_randomID(t *testing.T) { |
||||
// we don't use quick.Check here because its output isn't
|
||||
// very helpful when the test fails.
|
||||
for i := 0; i < quickcfg.MaxCount; i++ { |
||||
a := gen(NodeID{}, quickrand).(NodeID) |
||||
dist := quickrand.Intn(len(NodeID{}) * 8) |
||||
result := randomID(a, dist) |
||||
actualdist := logdist(result, a) |
||||
|
||||
if dist != actualdist { |
||||
t.Log("a: ", a) |
||||
t.Log("result:", result) |
||||
t.Fatalf("#%d: distance of result is %d, want %d", i, actualdist, dist) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (NodeID) Generate(rand *rand.Rand, size int) reflect.Value { |
||||
var id NodeID |
||||
m := rand.Intn(len(id)) |
||||
for i := len(id) - 1; i > m; i-- { |
||||
id[i] = byte(rand.Uint32()) |
||||
} |
||||
return reflect.ValueOf(id) |
||||
} |
@ -0,0 +1,280 @@ |
||||
// Package discover implements the Node Discovery Protocol.
|
||||
//
|
||||
// The Node Discovery protocol provides a way to find RLPx nodes that
|
||||
// can be connected to. It uses a Kademlia-like protocol to maintain a
|
||||
// distributed database of the IDs and endpoints of all listening
|
||||
// nodes.
|
||||
package discover |
||||
|
||||
import ( |
||||
"net" |
||||
"sort" |
||||
"sync" |
||||
"time" |
||||
) |
||||
|
||||
const ( |
||||
alpha = 3 // Kademlia concurrency factor
|
||||
bucketSize = 16 // Kademlia bucket size
|
||||
nBuckets = nodeIDBits + 1 // Number of buckets
|
||||
) |
||||
|
||||
type Table struct { |
||||
mutex sync.Mutex // protects buckets, their content, and nursery
|
||||
buckets [nBuckets]*bucket // index of known nodes by distance
|
||||
nursery []*Node // bootstrap nodes
|
||||
|
||||
net transport |
||||
self *Node // metadata of the local node
|
||||
} |
||||
|
||||
// transport is implemented by the UDP transport.
|
||||
// it is an interface so we can test without opening lots of UDP
|
||||
// sockets and without generating a private key.
|
||||
type transport interface { |
||||
ping(*Node) error |
||||
findnode(e *Node, target NodeID) ([]*Node, error) |
||||
close() |
||||
} |
||||
|
||||
// bucket contains nodes, ordered by their last activity.
|
||||
type bucket struct { |
||||
lastLookup time.Time |
||||
entries []*Node |
||||
} |
||||
|
||||
func newTable(t transport, ourID NodeID, ourAddr *net.UDPAddr) *Table { |
||||
tab := &Table{net: t, self: newNode(ourID, ourAddr)} |
||||
for i := range tab.buckets { |
||||
tab.buckets[i] = new(bucket) |
||||
} |
||||
return tab |
||||
} |
||||
|
||||
// Self returns the local node ID.
|
||||
func (tab *Table) Self() NodeID { |
||||
return tab.self.ID |
||||
} |
||||
|
||||
// Close terminates the network listener.
|
||||
func (tab *Table) Close() { |
||||
tab.net.close() |
||||
} |
||||
|
||||
// Bootstrap sets the bootstrap nodes. These nodes are used to connect
|
||||
// to the network if the table is empty. Bootstrap will also attempt to
|
||||
// fill the table by performing random lookup operations on the
|
||||
// network.
|
||||
func (tab *Table) Bootstrap(nodes []*Node) { |
||||
tab.mutex.Lock() |
||||
// TODO: maybe filter nodes with bad fields (nil, etc.) to avoid strange crashes
|
||||
tab.nursery = make([]*Node, 0, len(nodes)) |
||||
for _, n := range nodes { |
||||
cpy := *n |
||||
tab.nursery = append(tab.nursery, &cpy) |
||||
} |
||||
tab.mutex.Unlock() |
||||
tab.refresh() |
||||
} |
||||
|
||||
// Lookup performs a network search for nodes close
|
||||
// to the given target. It approaches the target by querying
|
||||
// nodes that are closer to it on each iteration.
|
||||
func (tab *Table) Lookup(target NodeID) []*Node { |
||||
var ( |
||||
asked = make(map[NodeID]bool) |
||||
seen = make(map[NodeID]bool) |
||||
reply = make(chan []*Node, alpha) |
||||
pendingQueries = 0 |
||||
) |
||||
// don't query further if we hit the target or ourself.
|
||||
// unlikely to happen often in practice.
|
||||
asked[target] = true |
||||
asked[tab.self.ID] = true |
||||
|
||||
tab.mutex.Lock() |
||||
// update last lookup stamp (for refresh logic)
|
||||
tab.buckets[logdist(tab.self.ID, target)].lastLookup = time.Now() |
||||
// generate initial result set
|
||||
result := tab.closest(target, bucketSize) |
||||
tab.mutex.Unlock() |
||||
|
||||
for { |
||||
// ask the alpha closest nodes that we haven't asked yet
|
||||
for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { |
||||
n := result.entries[i] |
||||
if !asked[n.ID] { |
||||
asked[n.ID] = true |
||||
pendingQueries++ |
||||
go func() { |
||||
result, _ := tab.net.findnode(n, target) |
||||
reply <- result |
||||
}() |
||||
} |
||||
} |
||||
if pendingQueries == 0 { |
||||
// we have asked all closest nodes, stop the search
|
||||
break |
||||
} |
||||
|
||||
// wait for the next reply
|
||||
for _, n := range <-reply { |
||||
cn := n |
||||
if !seen[n.ID] { |
||||
seen[n.ID] = true |
||||
result.push(cn, bucketSize) |
||||
} |
||||
} |
||||
pendingQueries-- |
||||
} |
||||
return result.entries |
||||
} |
||||
|
||||
// refresh performs a lookup for a random target to keep buckets full.
|
||||
func (tab *Table) refresh() { |
||||
ld := -1 // logdist of chosen bucket
|
||||
tab.mutex.Lock() |
||||
for i, b := range tab.buckets { |
||||
if i > 0 && b.lastLookup.Before(time.Now().Add(-1*time.Hour)) { |
||||
ld = i |
||||
break |
||||
} |
||||
} |
||||
tab.mutex.Unlock() |
||||
|
||||
result := tab.Lookup(randomID(tab.self.ID, ld)) |
||||
if len(result) == 0 { |
||||
// bootstrap the table with a self lookup
|
||||
tab.mutex.Lock() |
||||
tab.add(tab.nursery) |
||||
tab.mutex.Unlock() |
||||
tab.Lookup(tab.self.ID) |
||||
// TODO: the Kademlia paper says that we're supposed to perform
|
||||
// random lookups in all buckets further away than our closest neighbor.
|
||||
} |
||||
} |
||||
|
||||
// closest returns the n nodes in the table that are closest to the
|
||||
// given id. The caller must hold tab.mutex.
|
||||
func (tab *Table) closest(target NodeID, nresults int) *nodesByDistance { |
||||
// This is a very wasteful way to find the closest nodes but
|
||||
// obviously correct. I believe that tree-based buckets would make
|
||||
// this easier to implement efficiently.
|
||||
close := &nodesByDistance{target: target} |
||||
for _, b := range tab.buckets { |
||||
for _, n := range b.entries { |
||||
close.push(n, nresults) |
||||
} |
||||
} |
||||
return close |
||||
} |
||||
|
||||
func (tab *Table) len() (n int) { |
||||
for _, b := range tab.buckets { |
||||
n += len(b.entries) |
||||
} |
||||
return n |
||||
} |
||||
|
||||
// bumpOrAdd updates the activity timestamp for the given node and
|
||||
// attempts to insert the node into a bucket. The returned Node might
|
||||
// not be part of the table. The caller must hold tab.mutex.
|
||||
func (tab *Table) bumpOrAdd(node NodeID, from *net.UDPAddr) (n *Node) { |
||||
b := tab.buckets[logdist(tab.self.ID, node)] |
||||
if n = b.bump(node); n == nil { |
||||
n = newNode(node, from) |
||||
if len(b.entries) == bucketSize { |
||||
tab.pingReplace(n, b) |
||||
} else { |
||||
b.entries = append(b.entries, n) |
||||
} |
||||
} |
||||
return n |
||||
} |
||||
|
||||
func (tab *Table) pingReplace(n *Node, b *bucket) { |
||||
old := b.entries[bucketSize-1] |
||||
go func() { |
||||
if err := tab.net.ping(old); err == nil { |
||||
// it responded, we don't need to replace it.
|
||||
return |
||||
} |
||||
// it didn't respond, replace the node if it is still the oldest node.
|
||||
tab.mutex.Lock() |
||||
if len(b.entries) > 0 && b.entries[len(b.entries)-1] == old { |
||||
// slide down other entries and put the new one in front.
|
||||
// TODO: insert in correct position to keep the order
|
||||
copy(b.entries[1:], b.entries) |
||||
b.entries[0] = n |
||||
} |
||||
tab.mutex.Unlock() |
||||
}() |
||||
} |
||||
|
||||
// bump updates the activity timestamp for the given node.
|
||||
// The caller must hold tab.mutex.
|
||||
func (tab *Table) bump(node NodeID) { |
||||
tab.buckets[logdist(tab.self.ID, node)].bump(node) |
||||
} |
||||
|
||||
// add puts the entries into the table if their corresponding
|
||||
// bucket is not full. The caller must hold tab.mutex.
|
||||
func (tab *Table) add(entries []*Node) { |
||||
outer: |
||||
for _, n := range entries { |
||||
if n == nil || n.ID == tab.self.ID { |
||||
// skip bad entries. The RLP decoder returns nil for empty
|
||||
// input lists.
|
||||
continue |
||||
} |
||||
bucket := tab.buckets[logdist(tab.self.ID, n.ID)] |
||||
for i := range bucket.entries { |
||||
if bucket.entries[i].ID == n.ID { |
||||
// already in bucket
|
||||
continue outer |
||||
} |
||||
} |
||||
if len(bucket.entries) < bucketSize { |
||||
bucket.entries = append(bucket.entries, n) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (b *bucket) bump(id NodeID) *Node { |
||||
for i, n := range b.entries { |
||||
if n.ID == id { |
||||
n.active = time.Now() |
||||
// move it to the front
|
||||
copy(b.entries[1:], b.entries[:i+1]) |
||||
b.entries[0] = n |
||||
return n |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// nodesByDistance is a list of nodes, ordered by
|
||||
// distance to target.
|
||||
type nodesByDistance struct { |
||||
entries []*Node |
||||
target NodeID |
||||
} |
||||
|
||||
// push adds the given node to the list, keeping the total size below maxElems.
|
||||
func (h *nodesByDistance) push(n *Node, maxElems int) { |
||||
ix := sort.Search(len(h.entries), func(i int) bool { |
||||
return distcmp(h.target, h.entries[i].ID, n.ID) > 0 |
||||
}) |
||||
if len(h.entries) < maxElems { |
||||
h.entries = append(h.entries, n) |
||||
} |
||||
if ix == len(h.entries) { |
||||
// farther away than all nodes we already have.
|
||||
// if there was room for it, the node is now the last element.
|
||||
} else { |
||||
// slide existing entries down to make room
|
||||
// this will overwrite the entry we just appended.
|
||||
copy(h.entries[ix+1:], h.entries[ix:]) |
||||
h.entries[ix] = n |
||||
} |
||||
} |
@ -0,0 +1,311 @@ |
||||
package discover |
||||
|
||||
import ( |
||||
"crypto/ecdsa" |
||||
"errors" |
||||
"fmt" |
||||
"math/rand" |
||||
"net" |
||||
"reflect" |
||||
"testing" |
||||
"testing/quick" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
) |
||||
|
||||
func TestTable_bumpOrAddBucketAssign(t *testing.T) { |
||||
tab := newTable(nil, NodeID{}, &net.UDPAddr{}) |
||||
for i := 1; i < len(tab.buckets); i++ { |
||||
tab.bumpOrAdd(randomID(tab.self.ID, i), &net.UDPAddr{}) |
||||
} |
||||
for i, b := range tab.buckets { |
||||
if i > 0 && len(b.entries) != 1 { |
||||
t.Errorf("bucket %d has %d entries, want 1", i, len(b.entries)) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestTable_bumpOrAddPingReplace(t *testing.T) { |
||||
pingC := make(pingC) |
||||
tab := newTable(pingC, NodeID{}, &net.UDPAddr{}) |
||||
last := fillBucket(tab, 200) |
||||
|
||||
// this bumpOrAdd should not replace the last node
|
||||
// because the node replies to ping.
|
||||
new := tab.bumpOrAdd(randomID(tab.self.ID, 200), &net.UDPAddr{}) |
||||
|
||||
pinged := <-pingC |
||||
if pinged != last.ID { |
||||
t.Fatalf("pinged wrong node: %v\nwant %v", pinged, last.ID) |
||||
} |
||||
|
||||
tab.mutex.Lock() |
||||
defer tab.mutex.Unlock() |
||||
if l := len(tab.buckets[200].entries); l != bucketSize { |
||||
t.Errorf("wrong bucket size after bumpOrAdd: got %d, want %d", bucketSize, l) |
||||
} |
||||
if !contains(tab.buckets[200].entries, last.ID) { |
||||
t.Error("last entry was removed") |
||||
} |
||||
if contains(tab.buckets[200].entries, new.ID) { |
||||
t.Error("new entry was added") |
||||
} |
||||
} |
||||
|
||||
func TestTable_bumpOrAddPingTimeout(t *testing.T) { |
||||
tab := newTable(pingC(nil), NodeID{}, &net.UDPAddr{}) |
||||
last := fillBucket(tab, 200) |
||||
|
||||
// this bumpOrAdd should replace the last node
|
||||
// because the node does not reply to ping.
|
||||
new := tab.bumpOrAdd(randomID(tab.self.ID, 200), &net.UDPAddr{}) |
||||
|
||||
// wait for async bucket update. damn. this needs to go away.
|
||||
time.Sleep(2 * time.Millisecond) |
||||
|
||||
tab.mutex.Lock() |
||||
defer tab.mutex.Unlock() |
||||
if l := len(tab.buckets[200].entries); l != bucketSize { |
||||
t.Errorf("wrong bucket size after bumpOrAdd: got %d, want %d", bucketSize, l) |
||||
} |
||||
if contains(tab.buckets[200].entries, last.ID) { |
||||
t.Error("last entry was not removed") |
||||
} |
||||
if !contains(tab.buckets[200].entries, new.ID) { |
||||
t.Error("new entry was not added") |
||||
} |
||||
} |
||||
|
||||
func fillBucket(tab *Table, ld int) (last *Node) { |
||||
b := tab.buckets[ld] |
||||
for len(b.entries) < bucketSize { |
||||
b.entries = append(b.entries, &Node{ID: randomID(tab.self.ID, ld)}) |
||||
} |
||||
return b.entries[bucketSize-1] |
||||
} |
||||
|
||||
type pingC chan NodeID |
||||
|
||||
func (t pingC) findnode(n *Node, target NodeID) ([]*Node, error) { |
||||
panic("findnode called on pingRecorder") |
||||
} |
||||
func (t pingC) close() { |
||||
panic("close called on pingRecorder") |
||||
} |
||||
func (t pingC) ping(n *Node) error { |
||||
if t == nil { |
||||
return errTimeout |
||||
} |
||||
t <- n.ID |
||||
return nil |
||||
} |
||||
|
||||
func TestTable_bump(t *testing.T) { |
||||
tab := newTable(nil, NodeID{}, &net.UDPAddr{}) |
||||
|
||||
// add an old entry and two recent ones
|
||||
oldactive := time.Now().Add(-2 * time.Minute) |
||||
old := &Node{ID: randomID(tab.self.ID, 200), active: oldactive} |
||||
others := []*Node{ |
||||
&Node{ID: randomID(tab.self.ID, 200), active: time.Now()}, |
||||
&Node{ID: randomID(tab.self.ID, 200), active: time.Now()}, |
||||
} |
||||
tab.add(append(others, old)) |
||||
if tab.buckets[200].entries[0] == old { |
||||
t.Fatal("old entry is at front of bucket") |
||||
} |
||||
|
||||
// bumping the old entry should move it to the front
|
||||
tab.bump(old.ID) |
||||
if old.active == oldactive { |
||||
t.Error("activity timestamp not updated") |
||||
} |
||||
if tab.buckets[200].entries[0] != old { |
||||
t.Errorf("bumped entry did not move to the front of bucket") |
||||
} |
||||
} |
||||
|
||||
func TestTable_closest(t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
test := func(test *closeTest) bool { |
||||
// for any node table, Target and N
|
||||
tab := newTable(nil, test.Self, &net.UDPAddr{}) |
||||
tab.add(test.All) |
||||
|
||||
// check that doClosest(Target, N) returns nodes
|
||||
result := tab.closest(test.Target, test.N).entries |
||||
if hasDuplicates(result) { |
||||
t.Errorf("result contains duplicates") |
||||
return false |
||||
} |
||||
if !sortedByDistanceTo(test.Target, result) { |
||||
t.Errorf("result is not sorted by distance to target") |
||||
return false |
||||
} |
||||
|
||||
// check that the number of results is min(N, tablen)
|
||||
wantN := test.N |
||||
if tlen := tab.len(); tlen < test.N { |
||||
wantN = tlen |
||||
} |
||||
if len(result) != wantN { |
||||
t.Errorf("wrong number of nodes: got %d, want %d", len(result), wantN) |
||||
return false |
||||
} else if len(result) == 0 { |
||||
return true // no need to check distance
|
||||
} |
||||
|
||||
// check that the result nodes have minimum distance to target.
|
||||
for _, b := range tab.buckets { |
||||
for _, n := range b.entries { |
||||
if contains(result, n.ID) { |
||||
continue // don't run the check below for nodes in result
|
||||
} |
||||
farthestResult := result[len(result)-1].ID |
||||
if distcmp(test.Target, n.ID, farthestResult) < 0 { |
||||
t.Errorf("table contains node that is closer to target but it's not in result") |
||||
t.Logf(" Target: %v", test.Target) |
||||
t.Logf(" Farthest Result: %v", farthestResult) |
||||
t.Logf(" ID: %v", n.ID) |
||||
return false |
||||
} |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
if err := quick.Check(test, quickcfg); err != nil { |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
type closeTest struct { |
||||
Self NodeID |
||||
Target NodeID |
||||
All []*Node |
||||
N int |
||||
} |
||||
|
||||
func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value { |
||||
t := &closeTest{ |
||||
Self: gen(NodeID{}, rand).(NodeID), |
||||
Target: gen(NodeID{}, rand).(NodeID), |
||||
N: rand.Intn(bucketSize), |
||||
} |
||||
for _, id := range gen([]NodeID{}, rand).([]NodeID) { |
||||
t.All = append(t.All, &Node{ID: id}) |
||||
} |
||||
return reflect.ValueOf(t) |
||||
} |
||||
|
||||
func TestTable_Lookup(t *testing.T) { |
||||
self := gen(NodeID{}, quickrand).(NodeID) |
||||
target := randomID(self, 200) |
||||
transport := findnodeOracle{t, target} |
||||
tab := newTable(transport, self, &net.UDPAddr{}) |
||||
|
||||
// lookup on empty table returns no nodes
|
||||
if results := tab.Lookup(target); len(results) > 0 { |
||||
t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results) |
||||
} |
||||
// seed table with initial node (otherwise lookup will terminate immediately)
|
||||
tab.bumpOrAdd(randomID(target, 200), &net.UDPAddr{Port: 200}) |
||||
|
||||
results := tab.Lookup(target) |
||||
t.Logf("results:") |
||||
for _, e := range results { |
||||
t.Logf(" ld=%d, %v", logdist(target, e.ID), e.ID) |
||||
} |
||||
if len(results) != bucketSize { |
||||
t.Errorf("wrong number of results: got %d, want %d", len(results), bucketSize) |
||||
} |
||||
if hasDuplicates(results) { |
||||
t.Errorf("result set contains duplicate entries") |
||||
} |
||||
if !sortedByDistanceTo(target, results) { |
||||
t.Errorf("result set not sorted by distance to target") |
||||
} |
||||
if !contains(results, target) { |
||||
t.Errorf("result set does not contain target") |
||||
} |
||||
} |
||||
|
||||
// findnode on this transport always returns at least one node
|
||||
// that is one bucket closer to the target.
|
||||
type findnodeOracle struct { |
||||
t *testing.T |
||||
target NodeID |
||||
} |
||||
|
||||
func (t findnodeOracle) findnode(n *Node, target NodeID) ([]*Node, error) { |
||||
t.t.Logf("findnode query at dist %d", n.DiscPort) |
||||
// current log distance is encoded in port number
|
||||
var result []*Node |
||||
switch n.DiscPort { |
||||
case 0: |
||||
panic("query to node at distance 0") |
||||
default: |
||||
// TODO: add more randomness to distances
|
||||
next := n.DiscPort - 1 |
||||
for i := 0; i < bucketSize; i++ { |
||||
result = append(result, &Node{ID: randomID(t.target, next), DiscPort: next}) |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
func (t findnodeOracle) close() {} |
||||
|
||||
func (t findnodeOracle) ping(n *Node) error { |
||||
return errors.New("ping is not supported by this transport") |
||||
} |
||||
|
||||
func hasDuplicates(slice []*Node) bool { |
||||
seen := make(map[NodeID]bool) |
||||
for _, e := range slice { |
||||
if seen[e.ID] { |
||||
return true |
||||
} |
||||
seen[e.ID] = true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func sortedByDistanceTo(distbase NodeID, slice []*Node) bool { |
||||
var last NodeID |
||||
for i, e := range slice { |
||||
if i > 0 && distcmp(distbase, e.ID, last) < 0 { |
||||
return false |
||||
} |
||||
last = e.ID |
||||
} |
||||
return true |
||||
} |
||||
|
||||
func contains(ns []*Node, id NodeID) bool { |
||||
for _, n := range ns { |
||||
if n.ID == id { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// gen wraps quick.Value so it's easier to use.
|
||||
// it generates a random value of the given value's type.
|
||||
func gen(typ interface{}, rand *rand.Rand) interface{} { |
||||
v, ok := quick.Value(reflect.TypeOf(typ), rand) |
||||
if !ok { |
||||
panic(fmt.Sprintf("couldn't generate random value of type %T", typ)) |
||||
} |
||||
return v.Interface() |
||||
} |
||||
|
||||
func newkey() *ecdsa.PrivateKey { |
||||
key, err := crypto.GenerateKey() |
||||
if err != nil { |
||||
panic("couldn't generate key: " + err.Error()) |
||||
} |
||||
return key |
||||
} |
@ -0,0 +1,431 @@ |
||||
package discover |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/ecdsa" |
||||
"errors" |
||||
"fmt" |
||||
"net" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/logger" |
||||
"github.com/ethereum/go-ethereum/p2p/nat" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
var log = logger.NewLogger("P2P Discovery") |
||||
|
||||
// Errors
|
||||
var ( |
||||
errPacketTooSmall = errors.New("too small") |
||||
errBadHash = errors.New("bad hash") |
||||
errExpired = errors.New("expired") |
||||
errTimeout = errors.New("RPC timeout") |
||||
errClosed = errors.New("socket closed") |
||||
) |
||||
|
||||
// Timeouts
|
||||
const ( |
||||
respTimeout = 300 * time.Millisecond |
||||
sendTimeout = 300 * time.Millisecond |
||||
expiration = 20 * time.Second |
||||
|
||||
refreshInterval = 1 * time.Hour |
||||
) |
||||
|
||||
// RPC packet types
|
||||
const ( |
||||
pingPacket = iota + 1 // zero is 'reserved'
|
||||
pongPacket |
||||
findnodePacket |
||||
neighborsPacket |
||||
) |
||||
|
||||
// RPC request structures
|
||||
type ( |
||||
ping struct { |
||||
IP string // our IP
|
||||
Port uint16 // our port
|
||||
Expiration uint64 |
||||
} |
||||
|
||||
// reply to Ping
|
||||
pong struct { |
||||
ReplyTok []byte |
||||
Expiration uint64 |
||||
} |
||||
|
||||
findnode struct { |
||||
// Id to look up. The responding node will send back nodes
|
||||
// closest to the target.
|
||||
Target NodeID |
||||
Expiration uint64 |
||||
} |
||||
|
||||
// reply to findnode
|
||||
neighbors struct { |
||||
Nodes []*Node |
||||
Expiration uint64 |
||||
} |
||||
) |
||||
|
||||
type rpcNode struct { |
||||
IP string |
||||
Port uint16 |
||||
ID NodeID |
||||
} |
||||
|
||||
// udp implements the RPC protocol.
|
||||
type udp struct { |
||||
conn *net.UDPConn |
||||
priv *ecdsa.PrivateKey |
||||
addpending chan *pending |
||||
replies chan reply |
||||
closing chan struct{} |
||||
nat nat.Interface |
||||
|
||||
*Table |
||||
} |
||||
|
||||
// pending represents a pending reply.
|
||||
//
|
||||
// some implementations of the protocol wish to send more than one
|
||||
// reply packet to findnode. in general, any neighbors packet cannot
|
||||
// be matched up with a specific findnode packet.
|
||||
//
|
||||
// our implementation handles this by storing a callback function for
|
||||
// each pending reply. incoming packets from a node are dispatched
|
||||
// to all the callback functions for that node.
|
||||
type pending struct { |
||||
// these fields must match in the reply.
|
||||
from NodeID |
||||
ptype byte |
||||
|
||||
// time when the request must complete
|
||||
deadline time.Time |
||||
|
||||
// callback is called when a matching reply arrives. if it returns
|
||||
// true, the callback is removed from the pending reply queue.
|
||||
// if it returns false, the reply is considered incomplete and
|
||||
// the callback will be invoked again for the next matching reply.
|
||||
callback func(resp interface{}) (done bool) |
||||
|
||||
// errc receives nil when the callback indicates completion or an
|
||||
// error if no further reply is received within the timeout.
|
||||
errc chan<- error |
||||
} |
||||
|
||||
type reply struct { |
||||
from NodeID |
||||
ptype byte |
||||
data interface{} |
||||
} |
||||
|
||||
// ListenUDP returns a new table that listens for UDP packets on laddr.
|
||||
func ListenUDP(priv *ecdsa.PrivateKey, laddr string, natm nat.Interface) (*Table, error) { |
||||
addr, err := net.ResolveUDPAddr("udp", laddr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
conn, err := net.ListenUDP("udp", addr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
udp := &udp{ |
||||
conn: conn, |
||||
priv: priv, |
||||
closing: make(chan struct{}), |
||||
addpending: make(chan *pending), |
||||
replies: make(chan reply), |
||||
} |
||||
|
||||
realaddr := conn.LocalAddr().(*net.UDPAddr) |
||||
if natm != nil { |
||||
if !realaddr.IP.IsLoopback() { |
||||
go nat.Map(natm, udp.closing, "udp", realaddr.Port, realaddr.Port, "ethereum discovery") |
||||
} |
||||
// TODO: react to external IP changes over time.
|
||||
if ext, err := natm.ExternalIP(); err == nil { |
||||
realaddr = &net.UDPAddr{IP: ext, Port: realaddr.Port} |
||||
} |
||||
} |
||||
udp.Table = newTable(udp, PubkeyID(&priv.PublicKey), realaddr) |
||||
|
||||
go udp.loop() |
||||
go udp.readLoop() |
||||
log.Infoln("Listening, ", udp.self) |
||||
return udp.Table, nil |
||||
} |
||||
|
||||
func (t *udp) close() { |
||||
close(t.closing) |
||||
t.conn.Close() |
||||
// TODO: wait for the loops to end.
|
||||
} |
||||
|
||||
// ping sends a ping message to the given node and waits for a reply.
|
||||
func (t *udp) ping(e *Node) error { |
||||
// TODO: maybe check for ReplyTo field in callback to measure RTT
|
||||
errc := t.pending(e.ID, pongPacket, func(interface{}) bool { return true }) |
||||
t.send(e, pingPacket, ping{ |
||||
IP: t.self.IP.String(), |
||||
Port: uint16(t.self.TCPPort), |
||||
Expiration: uint64(time.Now().Add(expiration).Unix()), |
||||
}) |
||||
return <-errc |
||||
} |
||||
|
||||
// findnode sends a findnode request to the given node and waits until
|
||||
// the node has sent up to k neighbors.
|
||||
func (t *udp) findnode(to *Node, target NodeID) ([]*Node, error) { |
||||
nodes := make([]*Node, 0, bucketSize) |
||||
nreceived := 0 |
||||
errc := t.pending(to.ID, neighborsPacket, func(r interface{}) bool { |
||||
reply := r.(*neighbors) |
||||
for _, n := range reply.Nodes { |
||||
nreceived++ |
||||
if n.isValid() { |
||||
nodes = append(nodes, n) |
||||
} |
||||
} |
||||
return nreceived >= bucketSize |
||||
}) |
||||
|
||||
t.send(to, findnodePacket, findnode{ |
||||
Target: target, |
||||
Expiration: uint64(time.Now().Add(expiration).Unix()), |
||||
}) |
||||
err := <-errc |
||||
return nodes, err |
||||
} |
||||
|
||||
// pending adds a reply callback to the pending reply queue.
|
||||
// see the documentation of type pending for a detailed explanation.
|
||||
func (t *udp) pending(id NodeID, ptype byte, callback func(interface{}) bool) <-chan error { |
||||
ch := make(chan error, 1) |
||||
p := &pending{from: id, ptype: ptype, callback: callback, errc: ch} |
||||
select { |
||||
case t.addpending <- p: |
||||
// loop will handle it
|
||||
case <-t.closing: |
||||
ch <- errClosed |
||||
} |
||||
return ch |
||||
} |
||||
|
||||
// loop runs in its own goroutin. it keeps track of
|
||||
// the refresh timer and the pending reply queue.
|
||||
func (t *udp) loop() { |
||||
var ( |
||||
pending []*pending |
||||
nextDeadline time.Time |
||||
timeout = time.NewTimer(0) |
||||
refresh = time.NewTicker(refreshInterval) |
||||
) |
||||
<-timeout.C // ignore first timeout
|
||||
defer refresh.Stop() |
||||
defer timeout.Stop() |
||||
|
||||
rearmTimeout := func() { |
||||
if len(pending) == 0 || nextDeadline == pending[0].deadline { |
||||
return |
||||
} |
||||
nextDeadline = pending[0].deadline |
||||
timeout.Reset(nextDeadline.Sub(time.Now())) |
||||
} |
||||
|
||||
for { |
||||
select { |
||||
case <-refresh.C: |
||||
go t.refresh() |
||||
|
||||
case <-t.closing: |
||||
for _, p := range pending { |
||||
p.errc <- errClosed |
||||
} |
||||
return |
||||
|
||||
case p := <-t.addpending: |
||||
p.deadline = time.Now().Add(respTimeout) |
||||
pending = append(pending, p) |
||||
rearmTimeout() |
||||
|
||||
case reply := <-t.replies: |
||||
// run matching callbacks, remove if they return false.
|
||||
for i, p := range pending { |
||||
if reply.from == p.from && reply.ptype == p.ptype && p.callback(reply.data) { |
||||
p.errc <- nil |
||||
copy(pending[i:], pending[i+1:]) |
||||
pending = pending[:len(pending)-1] |
||||
i-- |
||||
} |
||||
} |
||||
rearmTimeout() |
||||
|
||||
case now := <-timeout.C: |
||||
// notify and remove callbacks whose deadline is in the past.
|
||||
i := 0 |
||||
for ; i < len(pending) && now.After(pending[i].deadline); i++ { |
||||
pending[i].errc <- errTimeout |
||||
} |
||||
if i > 0 { |
||||
copy(pending, pending[i:]) |
||||
pending = pending[:len(pending)-i] |
||||
} |
||||
rearmTimeout() |
||||
} |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
macSize = 256 / 8 |
||||
sigSize = 520 / 8 |
||||
headSize = macSize + sigSize // space of packet frame data
|
||||
) |
||||
|
||||
var headSpace = make([]byte, headSize) |
||||
|
||||
func (t *udp) send(to *Node, ptype byte, req interface{}) error { |
||||
b := new(bytes.Buffer) |
||||
b.Write(headSpace) |
||||
b.WriteByte(ptype) |
||||
if err := rlp.Encode(b, req); err != nil { |
||||
log.Errorln("error encoding packet:", err) |
||||
return err |
||||
} |
||||
|
||||
packet := b.Bytes() |
||||
sig, err := crypto.Sign(crypto.Sha3(packet[headSize:]), t.priv) |
||||
if err != nil { |
||||
log.Errorln("could not sign packet:", err) |
||||
return err |
||||
} |
||||
copy(packet[macSize:], sig) |
||||
// add the hash to the front. Note: this doesn't protect the
|
||||
// packet in any way. Our public key will be part of this hash in
|
||||
// the future.
|
||||
copy(packet, crypto.Sha3(packet[macSize:])) |
||||
|
||||
toaddr := &net.UDPAddr{IP: to.IP, Port: to.DiscPort} |
||||
log.DebugDetailf(">>> %v %T %v\n", toaddr, req, req) |
||||
if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil { |
||||
log.DebugDetailln("UDP send failed:", err) |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// readLoop runs in its own goroutine. it handles incoming UDP packets.
|
||||
func (t *udp) readLoop() { |
||||
defer t.conn.Close() |
||||
buf := make([]byte, 4096) // TODO: good buffer size
|
||||
for { |
||||
nbytes, from, err := t.conn.ReadFromUDP(buf) |
||||
if err != nil { |
||||
return |
||||
} |
||||
if err := t.packetIn(from, buf[:nbytes]); err != nil { |
||||
log.Debugf("Bad packet from %v: %v\n", from, err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (t *udp) packetIn(from *net.UDPAddr, buf []byte) error { |
||||
if len(buf) < headSize+1 { |
||||
return errPacketTooSmall |
||||
} |
||||
hash, sig, sigdata := buf[:macSize], buf[macSize:headSize], buf[headSize:] |
||||
shouldhash := crypto.Sha3(buf[macSize:]) |
||||
if !bytes.Equal(hash, shouldhash) { |
||||
return errBadHash |
||||
} |
||||
fromID, err := recoverNodeID(crypto.Sha3(buf[headSize:]), sig) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
var req interface { |
||||
handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error |
||||
} |
||||
switch ptype := sigdata[0]; ptype { |
||||
case pingPacket: |
||||
req = new(ping) |
||||
case pongPacket: |
||||
req = new(pong) |
||||
case findnodePacket: |
||||
req = new(findnode) |
||||
case neighborsPacket: |
||||
req = new(neighbors) |
||||
default: |
||||
return fmt.Errorf("unknown type: %d", ptype) |
||||
} |
||||
if err := rlp.Decode(bytes.NewReader(sigdata[1:]), req); err != nil { |
||||
return err |
||||
} |
||||
log.DebugDetailf("<<< %v %T %v\n", from, req, req) |
||||
return req.handle(t, from, fromID, hash) |
||||
} |
||||
|
||||
func (req *ping) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error { |
||||
if expired(req.Expiration) { |
||||
return errExpired |
||||
} |
||||
t.mutex.Lock() |
||||
// Note: we're ignoring the provided IP address right now
|
||||
n := t.bumpOrAdd(fromID, from) |
||||
if req.Port != 0 { |
||||
n.TCPPort = int(req.Port) |
||||
} |
||||
t.mutex.Unlock() |
||||
|
||||
t.send(n, pongPacket, pong{ |
||||
ReplyTok: mac, |
||||
Expiration: uint64(time.Now().Add(expiration).Unix()), |
||||
}) |
||||
return nil |
||||
} |
||||
|
||||
func (req *pong) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error { |
||||
if expired(req.Expiration) { |
||||
return errExpired |
||||
} |
||||
t.mutex.Lock() |
||||
t.bump(fromID) |
||||
t.mutex.Unlock() |
||||
|
||||
t.replies <- reply{fromID, pongPacket, req} |
||||
return nil |
||||
} |
||||
|
||||
func (req *findnode) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error { |
||||
if expired(req.Expiration) { |
||||
return errExpired |
||||
} |
||||
t.mutex.Lock() |
||||
e := t.bumpOrAdd(fromID, from) |
||||
closest := t.closest(req.Target, bucketSize).entries |
||||
t.mutex.Unlock() |
||||
|
||||
t.send(e, neighborsPacket, neighbors{ |
||||
Nodes: closest, |
||||
Expiration: uint64(time.Now().Add(expiration).Unix()), |
||||
}) |
||||
return nil |
||||
} |
||||
|
||||
func (req *neighbors) handle(t *udp, from *net.UDPAddr, fromID NodeID, mac []byte) error { |
||||
if expired(req.Expiration) { |
||||
return errExpired |
||||
} |
||||
t.mutex.Lock() |
||||
t.bump(fromID) |
||||
t.add(req.Nodes) |
||||
t.mutex.Unlock() |
||||
|
||||
t.replies <- reply{fromID, neighborsPacket, req} |
||||
return nil |
||||
} |
||||
|
||||
func expired(ts uint64) bool { |
||||
return time.Unix(int64(ts), 0).Before(time.Now()) |
||||
} |
@ -0,0 +1,211 @@ |
||||
package discover |
||||
|
||||
import ( |
||||
"fmt" |
||||
logpkg "log" |
||||
"net" |
||||
"os" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/logger" |
||||
) |
||||
|
||||
func init() { |
||||
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, logpkg.LstdFlags, logger.ErrorLevel)) |
||||
} |
||||
|
||||
func TestUDP_ping(t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
n1, _ := ListenUDP(newkey(), "127.0.0.1:0", nil) |
||||
n2, _ := ListenUDP(newkey(), "127.0.0.1:0", nil) |
||||
defer n1.Close() |
||||
defer n2.Close() |
||||
|
||||
if err := n1.net.ping(n2.self); err != nil { |
||||
t.Fatalf("ping error: %v", err) |
||||
} |
||||
if find(n2, n1.self.ID) == nil { |
||||
t.Errorf("node 2 does not contain id of node 1") |
||||
} |
||||
if e := find(n1, n2.self.ID); e != nil { |
||||
t.Errorf("node 1 does contains id of node 2: %v", e) |
||||
} |
||||
} |
||||
|
||||
func find(tab *Table, id NodeID) *Node { |
||||
for _, b := range tab.buckets { |
||||
for _, e := range b.entries { |
||||
if e.ID == id { |
||||
return e |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func TestUDP_findnode(t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
n1, _ := ListenUDP(newkey(), "127.0.0.1:0", nil) |
||||
n2, _ := ListenUDP(newkey(), "127.0.0.1:0", nil) |
||||
defer n1.Close() |
||||
defer n2.Close() |
||||
|
||||
// put a few nodes into n2. the exact distribution shouldn't
|
||||
// matter much, altough we need to take care not to overflow
|
||||
// any bucket.
|
||||
target := randomID(n1.self.ID, 100) |
||||
nodes := &nodesByDistance{target: target} |
||||
for i := 0; i < bucketSize; i++ { |
||||
n2.add([]*Node{&Node{ |
||||
IP: net.IP{1, 2, 3, byte(i)}, |
||||
DiscPort: i + 2, |
||||
TCPPort: i + 2, |
||||
ID: randomID(n2.self.ID, i+2), |
||||
}}) |
||||
} |
||||
n2.add(nodes.entries) |
||||
n2.bumpOrAdd(n1.self.ID, &net.UDPAddr{IP: n1.self.IP, Port: n1.self.DiscPort}) |
||||
expected := n2.closest(target, bucketSize) |
||||
|
||||
err := runUDP(10, func() error { |
||||
result, _ := n1.net.findnode(n2.self, target) |
||||
if len(result) != bucketSize { |
||||
return fmt.Errorf("wrong number of results: got %d, want %d", len(result), bucketSize) |
||||
} |
||||
for i := range result { |
||||
if result[i].ID != expected.entries[i].ID { |
||||
return fmt.Errorf("result mismatch at %d:\n got: %v\n want: %v", i, result[i], expected.entries[i]) |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
if err != nil { |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
func TestUDP_replytimeout(t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
// reserve a port so we don't talk to an existing service by accident
|
||||
addr, _ := net.ResolveUDPAddr("udp", "127.0.0.1:0") |
||||
fd, err := net.ListenUDP("udp", addr) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer fd.Close() |
||||
|
||||
n1, _ := ListenUDP(newkey(), "127.0.0.1:0", nil) |
||||
defer n1.Close() |
||||
n2 := n1.bumpOrAdd(randomID(n1.self.ID, 10), fd.LocalAddr().(*net.UDPAddr)) |
||||
|
||||
if err := n1.net.ping(n2); err != errTimeout { |
||||
t.Error("expected timeout error, got", err) |
||||
} |
||||
|
||||
if result, err := n1.net.findnode(n2, n1.self.ID); err != errTimeout { |
||||
t.Error("expected timeout error, got", err) |
||||
} else if len(result) > 0 { |
||||
t.Error("expected empty result, got", result) |
||||
} |
||||
} |
||||
|
||||
func TestUDP_findnodeMultiReply(t *testing.T) { |
||||
t.Parallel() |
||||
|
||||
n1, _ := ListenUDP(newkey(), "127.0.0.1:0", nil) |
||||
n2, _ := ListenUDP(newkey(), "127.0.0.1:0", nil) |
||||
udp2 := n2.net.(*udp) |
||||
defer n1.Close() |
||||
defer n2.Close() |
||||
|
||||
err := runUDP(10, func() error { |
||||
nodes := make([]*Node, bucketSize) |
||||
for i := range nodes { |
||||
nodes[i] = &Node{ |
||||
IP: net.IP{1, 2, 3, 4}, |
||||
DiscPort: i + 1, |
||||
TCPPort: i + 1, |
||||
ID: randomID(n2.self.ID, i+1), |
||||
} |
||||
} |
||||
|
||||
// ask N2 for neighbors. it will send an empty reply back.
|
||||
// the request will wait for up to bucketSize replies.
|
||||
resultc := make(chan []*Node) |
||||
errc := make(chan error) |
||||
go func() { |
||||
ns, err := n1.net.findnode(n2.self, n1.self.ID) |
||||
if err != nil { |
||||
errc <- err |
||||
} else { |
||||
resultc <- ns |
||||
} |
||||
}() |
||||
|
||||
// send a few more neighbors packets to N1.
|
||||
// it should collect those.
|
||||
for end := 0; end < len(nodes); { |
||||
off := end |
||||
if end = end + 5; end > len(nodes) { |
||||
end = len(nodes) |
||||
} |
||||
udp2.send(n1.self, neighborsPacket, neighbors{ |
||||
Nodes: nodes[off:end], |
||||
Expiration: uint64(time.Now().Add(10 * time.Second).Unix()), |
||||
}) |
||||
} |
||||
|
||||
// check that they are all returned. we cannot just check for
|
||||
// equality because they might not be returned in the order they
|
||||
// were sent.
|
||||
var result []*Node |
||||
select { |
||||
case result = <-resultc: |
||||
case err := <-errc: |
||||
return err |
||||
} |
||||
if hasDuplicates(result) { |
||||
return fmt.Errorf("result slice contains duplicates") |
||||
} |
||||
if len(result) != len(nodes) { |
||||
return fmt.Errorf("wrong number of nodes returned: got %d, want %d", len(result), len(nodes)) |
||||
} |
||||
matched := make(map[NodeID]bool) |
||||
for _, n := range result { |
||||
for _, expn := range nodes { |
||||
if n.ID == expn.ID { // && bytes.Equal(n.Addr.IP, expn.Addr.IP) && n.Addr.Port == expn.Addr.Port {
|
||||
matched[n.ID] = true |
||||
} |
||||
} |
||||
} |
||||
if len(matched) != len(nodes) { |
||||
return fmt.Errorf("wrong number of matching nodes: got %d, want %d", len(matched), len(nodes)) |
||||
} |
||||
return nil |
||||
}) |
||||
if err != nil { |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
// runUDP runs a test n times and returns an error if the test failed
|
||||
// in all n runs. This is necessary because UDP is unreliable even for
|
||||
// connections on the local machine, causing test failures.
|
||||
func runUDP(n int, test func() error) error { |
||||
errcount := 0 |
||||
errors := "" |
||||
for i := 0; i < n; i++ { |
||||
if err := test(); err != nil { |
||||
errors += fmt.Sprintf("\n#%d: %v", i, err) |
||||
errcount++ |
||||
} |
||||
} |
||||
if errcount == n { |
||||
return fmt.Errorf("failed on all %d iterations:%s", n, errors) |
||||
} |
||||
return nil |
||||
} |
@ -1,23 +0,0 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
) |
||||
|
||||
func ParseNAT(natType string, gateway string) (nat NAT, err error) { |
||||
switch natType { |
||||
case "UPNP": |
||||
nat = UPNP() |
||||
case "PMP": |
||||
ip := net.ParseIP(gateway) |
||||
if ip == nil { |
||||
return nil, fmt.Errorf("cannot resolve PMP gateway IP %s", gateway) |
||||
} |
||||
nat = PMP(ip) |
||||
case "": |
||||
default: |
||||
return nil, fmt.Errorf("unrecognised NAT type '%s'", natType) |
||||
} |
||||
return |
||||
} |
@ -0,0 +1,235 @@ |
||||
// Package nat provides access to common port mapping protocols.
|
||||
package nat |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"net" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/logger" |
||||
"github.com/jackpal/go-nat-pmp" |
||||
) |
||||
|
||||
var log = logger.NewLogger("P2P NAT") |
||||
|
||||
// An implementation of nat.Interface can map local ports to ports
|
||||
// accessible from the Internet.
|
||||
type Interface interface { |
||||
// These methods manage a mapping between a port on the local
|
||||
// machine to a port that can be connected to from the internet.
|
||||
//
|
||||
// protocol is "UDP" or "TCP". Some implementations allow setting
|
||||
// a display name for the mapping. The mapping may be removed by
|
||||
// the gateway when its lifetime ends.
|
||||
AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error |
||||
DeleteMapping(protocol string, extport, intport int) error |
||||
|
||||
// This method should return the external (Internet-facing)
|
||||
// address of the gateway device.
|
||||
ExternalIP() (net.IP, error) |
||||
|
||||
// Should return name of the method. This is used for logging.
|
||||
String() string |
||||
} |
||||
|
||||
// Parse parses a NAT interface description.
|
||||
// The following formats are currently accepted.
|
||||
// Note that mechanism names are not case-sensitive.
|
||||
//
|
||||
// "" or "none" return nil
|
||||
// "extip:77.12.33.4" will assume the local machine is reachable on the given IP
|
||||
// "any" uses the first auto-detected mechanism
|
||||
// "upnp" uses the Universal Plug and Play protocol
|
||||
// "pmp" uses NAT-PMP with an auto-detected gateway address
|
||||
// "pmp:192.168.0.1" uses NAT-PMP with the given gateway address
|
||||
func Parse(spec string) (Interface, error) { |
||||
var ( |
||||
parts = strings.SplitN(spec, ":", 2) |
||||
mech = strings.ToLower(parts[0]) |
||||
ip net.IP |
||||
) |
||||
if len(parts) > 1 { |
||||
ip = net.ParseIP(parts[1]) |
||||
if ip == nil { |
||||
return nil, errors.New("invalid IP address") |
||||
} |
||||
} |
||||
switch mech { |
||||
case "", "none", "off": |
||||
return nil, nil |
||||
case "any", "auto", "on": |
||||
return Any(), nil |
||||
case "extip", "ip": |
||||
if ip == nil { |
||||
return nil, errors.New("missing IP address") |
||||
} |
||||
return ExtIP(ip), nil |
||||
case "upnp": |
||||
return UPnP(), nil |
||||
case "pmp", "natpmp", "nat-pmp": |
||||
return PMP(ip), nil |
||||
default: |
||||
return nil, fmt.Errorf("unknown mechanism %q", parts[0]) |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
mapTimeout = 20 * time.Minute |
||||
mapUpdateInterval = 15 * time.Minute |
||||
) |
||||
|
||||
// Map adds a port mapping on m and keeps it alive until c is closed.
|
||||
// This function is typically invoked in its own goroutine.
|
||||
func Map(m Interface, c chan struct{}, protocol string, extport, intport int, name string) { |
||||
refresh := time.NewTimer(mapUpdateInterval) |
||||
defer func() { |
||||
refresh.Stop() |
||||
log.Debugf("Deleting port mapping: %s %d -> %d (%s) using %s\n", protocol, extport, intport, name, m) |
||||
m.DeleteMapping(protocol, extport, intport) |
||||
}() |
||||
log.Debugf("add mapping: %s %d -> %d (%s) using %s\n", protocol, extport, intport, name, m) |
||||
if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil { |
||||
log.Errorf("mapping error: %v\n", err) |
||||
} |
||||
for { |
||||
select { |
||||
case _, ok := <-c: |
||||
if !ok { |
||||
return |
||||
} |
||||
case <-refresh.C: |
||||
log.DebugDetailf("refresh mapping: %s %d -> %d (%s) using %s\n", protocol, extport, intport, name, m) |
||||
if err := m.AddMapping(protocol, intport, extport, name, mapTimeout); err != nil { |
||||
log.Errorf("mapping error: %v\n", err) |
||||
} |
||||
refresh.Reset(mapUpdateInterval) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// ExtIP assumes that the local machine is reachable on the given
|
||||
// external IP address, and that any required ports were mapped manually.
|
||||
// Mapping operations will not return an error but won't actually do anything.
|
||||
func ExtIP(ip net.IP) Interface { |
||||
if ip == nil { |
||||
panic("IP must not be nil") |
||||
} |
||||
return extIP(ip) |
||||
} |
||||
|
||||
type extIP net.IP |
||||
|
||||
func (n extIP) ExternalIP() (net.IP, error) { return net.IP(n), nil } |
||||
func (n extIP) String() string { return fmt.Sprintf("ExtIP(%v)", net.IP(n)) } |
||||
|
||||
// These do nothing.
|
||||
func (extIP) AddMapping(string, int, int, string, time.Duration) error { return nil } |
||||
func (extIP) DeleteMapping(string, int, int) error { return nil } |
||||
|
||||
// Any returns a port mapper that tries to discover any supported
|
||||
// mechanism on the local network.
|
||||
func Any() Interface { |
||||
// TODO: attempt to discover whether the local machine has an
|
||||
// Internet-class address. Return ExtIP in this case.
|
||||
return startautodisc("UPnP or NAT-PMP", func() Interface { |
||||
found := make(chan Interface, 2) |
||||
go func() { found <- discoverUPnP() }() |
||||
go func() { found <- discoverPMP() }() |
||||
for i := 0; i < cap(found); i++ { |
||||
if c := <-found; c != nil { |
||||
return c |
||||
} |
||||
} |
||||
return nil |
||||
}) |
||||
} |
||||
|
||||
// UPnP returns a port mapper that uses UPnP. It will attempt to
|
||||
// discover the address of your router using UDP broadcasts.
|
||||
func UPnP() Interface { |
||||
return startautodisc("UPnP", discoverUPnP) |
||||
} |
||||
|
||||
// PMP returns a port mapper that uses NAT-PMP. The provided gateway
|
||||
// address should be the IP of your router. If the given gateway
|
||||
// address is nil, PMP will attempt to auto-discover the router.
|
||||
func PMP(gateway net.IP) Interface { |
||||
if gateway != nil { |
||||
return &pmp{gw: gateway, c: natpmp.NewClient(gateway)} |
||||
} |
||||
return startautodisc("NAT-PMP", discoverPMP) |
||||
} |
||||
|
||||
// autodisc represents a port mapping mechanism that is still being
|
||||
// auto-discovered. Calls to the Interface methods on this type will
|
||||
// wait until the discovery is done and then call the method on the
|
||||
// discovered mechanism.
|
||||
//
|
||||
// This type is useful because discovery can take a while but we
|
||||
// want return an Interface value from UPnP, PMP and Auto immediately.
|
||||
type autodisc struct { |
||||
what string |
||||
done <-chan Interface |
||||
|
||||
mu sync.Mutex |
||||
found Interface |
||||
} |
||||
|
||||
func startautodisc(what string, doit func() Interface) Interface { |
||||
// TODO: monitor network configuration and rerun doit when it changes.
|
||||
done := make(chan Interface) |
||||
ad := &autodisc{what: what, done: done} |
||||
go func() { done <- doit(); close(done) }() |
||||
return ad |
||||
} |
||||
|
||||
func (n *autodisc) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error { |
||||
if err := n.wait(); err != nil { |
||||
return err |
||||
} |
||||
return n.found.AddMapping(protocol, extport, intport, name, lifetime) |
||||
} |
||||
|
||||
func (n *autodisc) DeleteMapping(protocol string, extport, intport int) error { |
||||
if err := n.wait(); err != nil { |
||||
return err |
||||
} |
||||
return n.found.DeleteMapping(protocol, extport, intport) |
||||
} |
||||
|
||||
func (n *autodisc) ExternalIP() (net.IP, error) { |
||||
if err := n.wait(); err != nil { |
||||
return nil, err |
||||
} |
||||
return n.found.ExternalIP() |
||||
} |
||||
|
||||
func (n *autodisc) String() string { |
||||
n.mu.Lock() |
||||
defer n.mu.Unlock() |
||||
if n.found == nil { |
||||
return n.what |
||||
} else { |
||||
return n.found.String() |
||||
} |
||||
} |
||||
|
||||
func (n *autodisc) wait() error { |
||||
n.mu.Lock() |
||||
found := n.found |
||||
n.mu.Unlock() |
||||
if found != nil { |
||||
// already discovered
|
||||
return nil |
||||
} |
||||
if found = <-n.done; found == nil { |
||||
return errors.New("no devices discovered") |
||||
} |
||||
n.mu.Lock() |
||||
n.found = found |
||||
n.mu.Unlock() |
||||
return nil |
||||
} |
@ -0,0 +1,115 @@ |
||||
package nat |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/jackpal/go-nat-pmp" |
||||
) |
||||
|
||||
// natPMPClient adapts the NAT-PMP protocol implementation so it conforms to
|
||||
// the common interface.
|
||||
type pmp struct { |
||||
gw net.IP |
||||
c *natpmp.Client |
||||
} |
||||
|
||||
func (n *pmp) String() string { |
||||
return fmt.Sprintf("NAT-PMP(%v)", n.gw) |
||||
} |
||||
|
||||
func (n *pmp) ExternalIP() (net.IP, error) { |
||||
response, err := n.c.GetExternalAddress() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return response.ExternalIPAddress[:], nil |
||||
} |
||||
|
||||
func (n *pmp) AddMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error { |
||||
if lifetime <= 0 { |
||||
return fmt.Errorf("lifetime must not be <= 0") |
||||
} |
||||
// Note order of port arguments is switched between our
|
||||
// AddMapping and the client's AddPortMapping.
|
||||
_, err := n.c.AddPortMapping(strings.ToLower(protocol), intport, extport, int(lifetime/time.Second)) |
||||
return err |
||||
} |
||||
|
||||
func (n *pmp) DeleteMapping(protocol string, extport, intport int) (err error) { |
||||
// To destroy a mapping, send an add-port with an internalPort of
|
||||
// the internal port to destroy, an external port of zero and a
|
||||
// time of zero.
|
||||
_, err = n.c.AddPortMapping(strings.ToLower(protocol), intport, 0, 0) |
||||
return err |
||||
} |
||||
|
||||
func discoverPMP() Interface { |
||||
// run external address lookups on all potential gateways
|
||||
gws := potentialGateways() |
||||
found := make(chan *pmp, len(gws)) |
||||
for i := range gws { |
||||
gw := gws[i] |
||||
go func() { |
||||
c := natpmp.NewClient(gw) |
||||
if _, err := c.GetExternalAddress(); err != nil { |
||||
found <- nil |
||||
} else { |
||||
found <- &pmp{gw, c} |
||||
} |
||||
}() |
||||
} |
||||
// return the one that responds first.
|
||||
// discovery needs to be quick, so we stop caring about
|
||||
// any responses after a very short timeout.
|
||||
timeout := time.NewTimer(1 * time.Second) |
||||
defer timeout.Stop() |
||||
for _ = range gws { |
||||
select { |
||||
case c := <-found: |
||||
if c != nil { |
||||
return c |
||||
} |
||||
case <-timeout.C: |
||||
return nil |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
var ( |
||||
// LAN IP ranges
|
||||
_, lan10, _ = net.ParseCIDR("10.0.0.0/8") |
||||
_, lan176, _ = net.ParseCIDR("172.16.0.0/12") |
||||
_, lan192, _ = net.ParseCIDR("192.168.0.0/16") |
||||
) |
||||
|
||||
// TODO: improve this. We currently assume that (on most networks)
|
||||
// the router is X.X.X.1 in a local LAN range.
|
||||
func potentialGateways() (gws []net.IP) { |
||||
ifaces, err := net.Interfaces() |
||||
if err != nil { |
||||
return nil |
||||
} |
||||
for _, iface := range ifaces { |
||||
ifaddrs, err := iface.Addrs() |
||||
if err != nil { |
||||
return gws |
||||
} |
||||
for _, addr := range ifaddrs { |
||||
switch x := addr.(type) { |
||||
case *net.IPNet: |
||||
if lan10.Contains(x.IP) || lan176.Contains(x.IP) || lan192.Contains(x.IP) { |
||||
ip := x.IP.Mask(x.Mask).To4() |
||||
if ip != nil { |
||||
ip[3] = ip[3] | 0x01 |
||||
gws = append(gws, ip) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
return gws |
||||
} |
@ -0,0 +1,149 @@ |
||||
package nat |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"net" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/fjl/goupnp" |
||||
"github.com/fjl/goupnp/dcps/internetgateway1" |
||||
"github.com/fjl/goupnp/dcps/internetgateway2" |
||||
) |
||||
|
||||
type upnp struct { |
||||
dev *goupnp.RootDevice |
||||
service string |
||||
client upnpClient |
||||
} |
||||
|
||||
type upnpClient interface { |
||||
GetExternalIPAddress() (string, error) |
||||
AddPortMapping(string, uint16, string, uint16, string, bool, string, uint32) error |
||||
DeletePortMapping(string, uint16, string) error |
||||
GetNATRSIPStatus() (sip bool, nat bool, err error) |
||||
} |
||||
|
||||
func (n *upnp) ExternalIP() (addr net.IP, err error) { |
||||
ipString, err := n.client.GetExternalIPAddress() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ip := net.ParseIP(ipString) |
||||
if ip == nil { |
||||
return nil, errors.New("bad IP in response") |
||||
} |
||||
return ip, nil |
||||
} |
||||
|
||||
func (n *upnp) AddMapping(protocol string, extport, intport int, desc string, lifetime time.Duration) error { |
||||
ip, err := n.internalAddress() |
||||
if err != nil { |
||||
return nil |
||||
} |
||||
protocol = strings.ToUpper(protocol) |
||||
lifetimeS := uint32(lifetime / time.Second) |
||||
return n.client.AddPortMapping("", uint16(extport), protocol, uint16(intport), ip.String(), true, desc, lifetimeS) |
||||
} |
||||
|
||||
func (n *upnp) internalAddress() (net.IP, error) { |
||||
devaddr, err := net.ResolveUDPAddr("udp4", n.dev.URLBase.Host) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
ifaces, err := net.Interfaces() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, iface := range ifaces { |
||||
addrs, err := iface.Addrs() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, addr := range addrs { |
||||
switch x := addr.(type) { |
||||
case *net.IPNet: |
||||
if x.Contains(devaddr.IP) { |
||||
return x.IP, nil |
||||
} |
||||
} |
||||
} |
||||
} |
||||
return nil, fmt.Errorf("could not find local address in same net as %v", devaddr) |
||||
} |
||||
|
||||
func (n *upnp) DeleteMapping(protocol string, extport, intport int) error { |
||||
return n.client.DeletePortMapping("", uint16(extport), strings.ToUpper(protocol)) |
||||
} |
||||
|
||||
func (n *upnp) String() string { |
||||
return "UPNP " + n.service |
||||
} |
||||
|
||||
// discoverUPnP searches for Internet Gateway Devices
|
||||
// and returns the first one it can find on the local network.
|
||||
func discoverUPnP() Interface { |
||||
found := make(chan *upnp, 2) |
||||
// IGDv1
|
||||
go discover(found, internetgateway1.URN_WANConnectionDevice_1, func(dev *goupnp.RootDevice, sc goupnp.ServiceClient) *upnp { |
||||
switch sc.Service.ServiceType { |
||||
case internetgateway1.URN_WANIPConnection_1: |
||||
return &upnp{dev, "IGDv1-IP1", &internetgateway1.WANIPConnection1{sc}} |
||||
case internetgateway1.URN_WANPPPConnection_1: |
||||
return &upnp{dev, "IGDv1-PPP1", &internetgateway1.WANPPPConnection1{sc}} |
||||
} |
||||
return nil |
||||
}) |
||||
// IGDv2
|
||||
go discover(found, internetgateway2.URN_WANConnectionDevice_2, func(dev *goupnp.RootDevice, sc goupnp.ServiceClient) *upnp { |
||||
switch sc.Service.ServiceType { |
||||
case internetgateway2.URN_WANIPConnection_1: |
||||
return &upnp{dev, "IGDv2-IP1", &internetgateway2.WANIPConnection1{sc}} |
||||
case internetgateway2.URN_WANIPConnection_2: |
||||
return &upnp{dev, "IGDv2-IP2", &internetgateway2.WANIPConnection2{sc}} |
||||
case internetgateway2.URN_WANPPPConnection_1: |
||||
return &upnp{dev, "IGDv2-PPP1", &internetgateway2.WANPPPConnection1{sc}} |
||||
} |
||||
return nil |
||||
}) |
||||
for i := 0; i < cap(found); i++ { |
||||
if c := <-found; c != nil { |
||||
return c |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func discover(out chan<- *upnp, target string, matcher func(*goupnp.RootDevice, goupnp.ServiceClient) *upnp) { |
||||
devs, err := goupnp.DiscoverDevices(target) |
||||
if err != nil { |
||||
return |
||||
} |
||||
found := false |
||||
for i := 0; i < len(devs) && !found; i++ { |
||||
if devs[i].Root == nil { |
||||
continue |
||||
} |
||||
devs[i].Root.Device.VisitServices(func(service *goupnp.Service) { |
||||
if found { |
||||
return |
||||
} |
||||
// check for a matching IGD service
|
||||
sc := goupnp.ServiceClient{service.NewSOAPClient(), devs[i].Root, service} |
||||
upnp := matcher(devs[i].Root, sc) |
||||
if upnp == nil { |
||||
return |
||||
} |
||||
// check whether port mapping is enabled
|
||||
if _, nat, err := upnp.client.GetNATRSIPStatus(); err != nil || !nat { |
||||
return |
||||
} |
||||
out <- upnp |
||||
found = true |
||||
}) |
||||
} |
||||
if !found { |
||||
out <- nil |
||||
} |
||||
} |
@ -1,55 +0,0 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
"time" |
||||
|
||||
natpmp "github.com/jackpal/go-nat-pmp" |
||||
) |
||||
|
||||
// Adapt the NAT-PMP protocol to the NAT interface
|
||||
|
||||
// TODO:
|
||||
// + Register for changes to the external address.
|
||||
// + Re-register port mapping when router reboots.
|
||||
// + A mechanism for keeping a port mapping registered.
|
||||
// + Discover gateway address automatically.
|
||||
|
||||
type natPMPClient struct { |
||||
client *natpmp.Client |
||||
} |
||||
|
||||
// PMP returns a NAT traverser that uses NAT-PMP. The provided gateway
|
||||
// address should be the IP of your router.
|
||||
func PMP(gateway net.IP) (nat NAT) { |
||||
return &natPMPClient{natpmp.NewClient(gateway)} |
||||
} |
||||
|
||||
func (*natPMPClient) String() string { |
||||
return "NAT-PMP" |
||||
} |
||||
|
||||
func (n *natPMPClient) GetExternalAddress() (net.IP, error) { |
||||
response, err := n.client.GetExternalAddress() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return response.ExternalIPAddress[:], nil |
||||
} |
||||
|
||||
func (n *natPMPClient) AddPortMapping(protocol string, extport, intport int, name string, lifetime time.Duration) error { |
||||
if lifetime <= 0 { |
||||
return fmt.Errorf("lifetime must not be <= 0") |
||||
} |
||||
// Note order of port arguments is switched between our AddPortMapping and the client's AddPortMapping.
|
||||
_, err := n.client.AddPortMapping(protocol, intport, extport, int(lifetime/time.Second)) |
||||
return err |
||||
} |
||||
|
||||
func (n *natPMPClient) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { |
||||
// To destroy a mapping, send an add-port with
|
||||
// an internalPort of the internal port to destroy, an external port of zero and a time of zero.
|
||||
_, err = n.client.AddPortMapping(protocol, internalPort, 0, 0) |
||||
return |
||||
} |
@ -1,341 +0,0 @@ |
||||
package p2p |
||||
|
||||
// Just enough UPnP to be able to forward ports
|
||||
//
|
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/xml" |
||||
"errors" |
||||
"fmt" |
||||
"net" |
||||
"net/http" |
||||
"os" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
const ( |
||||
upnpDiscoverAttempts = 3 |
||||
upnpDiscoverTimeout = 5 * time.Second |
||||
) |
||||
|
||||
// UPNP returns a NAT port mapper that uses UPnP. It will attempt to
|
||||
// discover the address of your router using UDP broadcasts.
|
||||
func UPNP() NAT { |
||||
return &upnpNAT{} |
||||
} |
||||
|
||||
type upnpNAT struct { |
||||
serviceURL string |
||||
ourIP string |
||||
} |
||||
|
||||
func (n *upnpNAT) String() string { |
||||
return "UPNP" |
||||
} |
||||
|
||||
func (n *upnpNAT) discover() error { |
||||
if n.serviceURL != "" { |
||||
// already discovered
|
||||
return nil |
||||
} |
||||
|
||||
ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// TODO: try on all network interfaces simultaneously.
|
||||
// Broadcasting on 0.0.0.0 could select a random interface
|
||||
// to send on (platform specific).
|
||||
conn, err := net.ListenPacket("udp4", ":0") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer conn.Close() |
||||
|
||||
conn.SetDeadline(time.Now().Add(10 * time.Second)) |
||||
st := "ST: urn:schemas-upnp-org:device:InternetGatewayDevice:1\r\n" |
||||
buf := bytes.NewBufferString( |
||||
"M-SEARCH * HTTP/1.1\r\n" + |
||||
"HOST: 239.255.255.250:1900\r\n" + |
||||
st + |
||||
"MAN: \"ssdp:discover\"\r\n" + |
||||
"MX: 2\r\n\r\n") |
||||
message := buf.Bytes() |
||||
answerBytes := make([]byte, 1024) |
||||
for i := 0; i < upnpDiscoverAttempts; i++ { |
||||
_, err = conn.WriteTo(message, ssdp) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
nn, _, err := conn.ReadFrom(answerBytes) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
answer := string(answerBytes[0:nn]) |
||||
if strings.Index(answer, "\r\n"+st) < 0 { |
||||
continue |
||||
} |
||||
// HTTP header field names are case-insensitive.
|
||||
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
|
||||
locString := "\r\nlocation: " |
||||
answer = strings.ToLower(answer) |
||||
locIndex := strings.Index(answer, locString) |
||||
if locIndex < 0 { |
||||
continue |
||||
} |
||||
loc := answer[locIndex+len(locString):] |
||||
endIndex := strings.Index(loc, "\r\n") |
||||
if endIndex < 0 { |
||||
continue |
||||
} |
||||
locURL := loc[0:endIndex] |
||||
var serviceURL string |
||||
serviceURL, err = getServiceURL(locURL) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
var ourIP string |
||||
ourIP, err = getOurIP() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
n.serviceURL = serviceURL |
||||
n.ourIP = ourIP |
||||
return nil |
||||
} |
||||
return errors.New("UPnP port discovery failed.") |
||||
} |
||||
|
||||
func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { |
||||
if err := n.discover(); err != nil { |
||||
return nil, err |
||||
} |
||||
info, err := n.getStatusInfo() |
||||
return net.ParseIP(info.externalIpAddress), err |
||||
} |
||||
|
||||
func (n *upnpNAT) AddPortMapping(protocol string, extport, intport int, description string, lifetime time.Duration) error { |
||||
if err := n.discover(); err != nil { |
||||
return err |
||||
} |
||||
|
||||
// A single concatenation would break ARM compilation.
|
||||
message := "<u:AddPortMapping xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\">\r\n" + |
||||
"<NewRemoteHost></NewRemoteHost><NewExternalPort>" + strconv.Itoa(extport) |
||||
message += "</NewExternalPort><NewProtocol>" + protocol + "</NewProtocol>" |
||||
message += "<NewInternalPort>" + strconv.Itoa(extport) + "</NewInternalPort>" + |
||||
"<NewInternalClient>" + n.ourIP + "</NewInternalClient>" + |
||||
"<NewEnabled>1</NewEnabled><NewPortMappingDescription>" |
||||
message += description + |
||||
"</NewPortMappingDescription><NewLeaseDuration>" + fmt.Sprint(lifetime/time.Second) + |
||||
"</NewLeaseDuration></u:AddPortMapping>" |
||||
|
||||
// TODO: check response to see if the port was forwarded
|
||||
_, err := soapRequest(n.serviceURL, "AddPortMapping", message) |
||||
return err |
||||
} |
||||
|
||||
func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) error { |
||||
if err := n.discover(); err != nil { |
||||
return err |
||||
} |
||||
|
||||
message := "<u:DeletePortMapping xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\">\r\n" + |
||||
"<NewRemoteHost></NewRemoteHost><NewExternalPort>" + strconv.Itoa(externalPort) + |
||||
"</NewExternalPort><NewProtocol>" + protocol + "</NewProtocol>" + |
||||
"</u:DeletePortMapping>" |
||||
|
||||
// TODO: check response to see if the port was deleted
|
||||
_, err := soapRequest(n.serviceURL, "DeletePortMapping", message) |
||||
return err |
||||
} |
||||
|
||||
type statusInfo struct { |
||||
externalIpAddress string |
||||
} |
||||
|
||||
func (n *upnpNAT) getStatusInfo() (info statusInfo, err error) { |
||||
message := "<u:GetStatusInfo xmlns:u=\"urn:schemas-upnp-org:service:WANIPConnection:1\">\r\n" + |
||||
"</u:GetStatusInfo>" |
||||
|
||||
var response *http.Response |
||||
response, err = soapRequest(n.serviceURL, "GetStatusInfo", message) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
// TODO: Write a soap reply parser. It has to eat the Body and envelope tags...
|
||||
|
||||
response.Body.Close() |
||||
return |
||||
} |
||||
|
||||
// service represents the Service type in an UPnP xml description.
|
||||
// Only the parts we care about are present and thus the xml may have more
|
||||
// fields than present in the structure.
|
||||
type service struct { |
||||
ServiceType string `xml:"serviceType"` |
||||
ControlURL string `xml:"controlURL"` |
||||
} |
||||
|
||||
// deviceList represents the deviceList type in an UPnP xml description.
|
||||
// Only the parts we care about are present and thus the xml may have more
|
||||
// fields than present in the structure.
|
||||
type deviceList struct { |
||||
XMLName xml.Name `xml:"deviceList"` |
||||
Device []device `xml:"device"` |
||||
} |
||||
|
||||
// serviceList represents the serviceList type in an UPnP xml description.
|
||||
// Only the parts we care about are present and thus the xml may have more
|
||||
// fields than present in the structure.
|
||||
type serviceList struct { |
||||
XMLName xml.Name `xml:"serviceList"` |
||||
Service []service `xml:"service"` |
||||
} |
||||
|
||||
// device represents the device type in an UPnP xml description.
|
||||
// Only the parts we care about are present and thus the xml may have more
|
||||
// fields than present in the structure.
|
||||
type device struct { |
||||
XMLName xml.Name `xml:"device"` |
||||
DeviceType string `xml:"deviceType"` |
||||
DeviceList deviceList `xml:"deviceList"` |
||||
ServiceList serviceList `xml:"serviceList"` |
||||
} |
||||
|
||||
// specVersion represents the specVersion in a UPnP xml description.
|
||||
// Only the parts we care about are present and thus the xml may have more
|
||||
// fields than present in the structure.
|
||||
type specVersion struct { |
||||
XMLName xml.Name `xml:"specVersion"` |
||||
Major int `xml:"major"` |
||||
Minor int `xml:"minor"` |
||||
} |
||||
|
||||
// root represents the Root document for a UPnP xml description.
|
||||
// Only the parts we care about are present and thus the xml may have more
|
||||
// fields than present in the structure.
|
||||
type root struct { |
||||
XMLName xml.Name `xml:"root"` |
||||
SpecVersion specVersion |
||||
Device device |
||||
} |
||||
|
||||
func getChildDevice(d *device, deviceType string) *device { |
||||
dl := d.DeviceList.Device |
||||
for i := 0; i < len(dl); i++ { |
||||
if dl[i].DeviceType == deviceType { |
||||
return &dl[i] |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func getChildService(d *device, serviceType string) *service { |
||||
sl := d.ServiceList.Service |
||||
for i := 0; i < len(sl); i++ { |
||||
if sl[i].ServiceType == serviceType { |
||||
return &sl[i] |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func getOurIP() (ip string, err error) { |
||||
hostname, err := os.Hostname() |
||||
if err != nil { |
||||
return |
||||
} |
||||
p, err := net.LookupIP(hostname) |
||||
if err != nil && len(p) > 0 { |
||||
return |
||||
} |
||||
return p[0].String(), nil |
||||
} |
||||
|
||||
func getServiceURL(rootURL string) (url string, err error) { |
||||
r, err := http.Get(rootURL) |
||||
if err != nil { |
||||
return |
||||
} |
||||
defer r.Body.Close() |
||||
if r.StatusCode >= 400 { |
||||
err = errors.New(string(r.StatusCode)) |
||||
return |
||||
} |
||||
var root root |
||||
err = xml.NewDecoder(r.Body).Decode(&root) |
||||
|
||||
if err != nil { |
||||
return |
||||
} |
||||
a := &root.Device |
||||
if a.DeviceType != "urn:schemas-upnp-org:device:InternetGatewayDevice:1" { |
||||
err = errors.New("No InternetGatewayDevice") |
||||
return |
||||
} |
||||
b := getChildDevice(a, "urn:schemas-upnp-org:device:WANDevice:1") |
||||
if b == nil { |
||||
err = errors.New("No WANDevice") |
||||
return |
||||
} |
||||
c := getChildDevice(b, "urn:schemas-upnp-org:device:WANConnectionDevice:1") |
||||
if c == nil { |
||||
err = errors.New("No WANConnectionDevice") |
||||
return |
||||
} |
||||
d := getChildService(c, "urn:schemas-upnp-org:service:WANIPConnection:1") |
||||
if d == nil { |
||||
err = errors.New("No WANIPConnection") |
||||
return |
||||
} |
||||
url = combineURL(rootURL, d.ControlURL) |
||||
return |
||||
} |
||||
|
||||
func combineURL(rootURL, subURL string) string { |
||||
protocolEnd := "://" |
||||
protoEndIndex := strings.Index(rootURL, protocolEnd) |
||||
a := rootURL[protoEndIndex+len(protocolEnd):] |
||||
rootIndex := strings.Index(a, "/") |
||||
return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL |
||||
} |
||||
|
||||
func soapRequest(url, function, message string) (r *http.Response, err error) { |
||||
fullMessage := "<?xml version=\"1.0\" ?>" + |
||||
"<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">\r\n" + |
||||
"<s:Body>" + message + "</s:Body></s:Envelope>" |
||||
|
||||
req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) |
||||
if err != nil { |
||||
return |
||||
} |
||||
req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") |
||||
req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") |
||||
//req.Header.Set("Transfer-Encoding", "chunked")
|
||||
req.Header.Set("SOAPAction", "\"urn:schemas-upnp-org:service:WANIPConnection:1#"+function+"\"") |
||||
req.Header.Set("Connection", "Close") |
||||
req.Header.Set("Cache-Control", "no-cache") |
||||
req.Header.Set("Pragma", "no-cache") |
||||
|
||||
r, err = http.DefaultClient.Do(req) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
if r.Body != nil { |
||||
defer r.Body.Close() |
||||
} |
||||
|
||||
if r.StatusCode >= 400 { |
||||
// log.Stderr(function, r.StatusCode)
|
||||
err = errors.New("Error " + strconv.Itoa(r.StatusCode) + " for " + function) |
||||
r = nil |
||||
return |
||||
} |
||||
return |
||||
} |
@ -1,158 +0,0 @@ |
||||
package p2p |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
"reflect" |
||||
"sync" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
) |
||||
|
||||
type peerId struct { |
||||
pubkey []byte |
||||
} |
||||
|
||||
func (self *peerId) String() string { |
||||
return fmt.Sprintf("test peer %x", self.Pubkey()[:4]) |
||||
} |
||||
|
||||
func (self *peerId) Pubkey() (pubkey []byte) { |
||||
pubkey = self.pubkey |
||||
if len(pubkey) == 0 { |
||||
pubkey = crypto.GenerateNewKeyPair().PublicKey |
||||
self.pubkey = pubkey |
||||
} |
||||
return |
||||
} |
||||
|
||||
func newTestPeer() (peer *Peer) { |
||||
peer = NewPeer(&peerId{}, []Cap{}) |
||||
peer.pubkeyHook = func(*peerAddr) error { return nil } |
||||
peer.ourID = &peerId{} |
||||
peer.listenAddr = &peerAddr{} |
||||
peer.otherPeers = func() []*Peer { return nil } |
||||
return |
||||
} |
||||
|
||||
func TestBaseProtocolPeers(t *testing.T) { |
||||
peerList := []*peerAddr{ |
||||
{IP: net.ParseIP("1.2.3.4"), Port: 2222, Pubkey: []byte{}}, |
||||
{IP: net.ParseIP("5.6.7.8"), Port: 3333, Pubkey: []byte{}}, |
||||
} |
||||
listenAddr := &peerAddr{IP: net.ParseIP("1.3.5.7"), Port: 1111, Pubkey: []byte{}} |
||||
rw1, rw2 := MsgPipe() |
||||
defer rw1.Close() |
||||
wg := new(sync.WaitGroup) |
||||
|
||||
// run matcher, close pipe when addresses have arrived
|
||||
numPeers := len(peerList) + 1 |
||||
addrChan := make(chan *peerAddr) |
||||
wg.Add(1) |
||||
go func() { |
||||
i := 0 |
||||
for got := range addrChan { |
||||
var want *peerAddr |
||||
switch { |
||||
case i < len(peerList): |
||||
want = peerList[i] |
||||
case i == len(peerList): |
||||
want = listenAddr // listenAddr should be the last thing sent
|
||||
} |
||||
t.Logf("got peer %d/%d: %v", i+1, numPeers, got) |
||||
if !reflect.DeepEqual(want, got) { |
||||
t.Errorf("mismatch: got %+v, want %+v", got, want) |
||||
} |
||||
i++ |
||||
if i == numPeers { |
||||
break |
||||
} |
||||
} |
||||
if i != numPeers { |
||||
t.Errorf("wrong number of peers received: got %d, want %d", i, numPeers) |
||||
} |
||||
rw1.Close() |
||||
wg.Done() |
||||
}() |
||||
|
||||
// run first peer (in background)
|
||||
peer1 := newTestPeer() |
||||
peer1.ourListenAddr = listenAddr |
||||
peer1.otherPeers = func() []*Peer { |
||||
pl := make([]*Peer, len(peerList)) |
||||
for i, addr := range peerList { |
||||
pl[i] = &Peer{listenAddr: addr} |
||||
} |
||||
return pl |
||||
} |
||||
wg.Add(1) |
||||
go func() { |
||||
runBaseProtocol(peer1, rw1) |
||||
wg.Done() |
||||
}() |
||||
|
||||
// run second peer
|
||||
peer2 := newTestPeer() |
||||
peer2.newPeerAddr = addrChan // feed peer suggestions into matcher
|
||||
if err := runBaseProtocol(peer2, rw2); err != ErrPipeClosed { |
||||
t.Errorf("peer2 terminated with unexpected error: %v", err) |
||||
} |
||||
|
||||
// terminate matcher
|
||||
close(addrChan) |
||||
wg.Wait() |
||||
} |
||||
|
||||
func TestBaseProtocolDisconnect(t *testing.T) { |
||||
peer := NewPeer(&peerId{}, nil) |
||||
peer.ourID = &peerId{} |
||||
peer.pubkeyHook = func(*peerAddr) error { return nil } |
||||
|
||||
rw1, rw2 := MsgPipe() |
||||
done := make(chan struct{}) |
||||
go func() { |
||||
if err := expectMsg(rw2, handshakeMsg); err != nil { |
||||
t.Error(err) |
||||
} |
||||
err := EncodeMsg(rw2, handshakeMsg, |
||||
baseProtocolVersion, |
||||
"", |
||||
[]interface{}{}, |
||||
0, |
||||
make([]byte, 64), |
||||
) |
||||
if err != nil { |
||||
t.Error(err) |
||||
} |
||||
if err := expectMsg(rw2, getPeersMsg); err != nil { |
||||
t.Error(err) |
||||
} |
||||
if err := EncodeMsg(rw2, discMsg, DiscQuitting); err != nil { |
||||
t.Error(err) |
||||
} |
||||
|
||||
close(done) |
||||
}() |
||||
|
||||
if err := runBaseProtocol(peer, rw1); err == nil { |
||||
t.Errorf("base protocol returned without error") |
||||
} else if reason, ok := err.(discRequestedError); !ok || reason != DiscQuitting { |
||||
t.Errorf("base protocol returned wrong error: %v", err) |
||||
} |
||||
<-done |
||||
} |
||||
|
||||
func expectMsg(r MsgReader, code uint64) error { |
||||
msg, err := r.ReadMsg() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if err := msg.Discard(); err != nil { |
||||
return err |
||||
} |
||||
if msg.Code != code { |
||||
return fmt.Errorf("wrong message code: got %d, expected %d", msg.Code, code) |
||||
} |
||||
return nil |
||||
} |
@ -1,40 +0,0 @@ |
||||
// +build none
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"log" |
||||
"net" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/secp256k1" |
||||
"github.com/ethereum/go-ethereum/logger" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
) |
||||
|
||||
func main() { |
||||
logger.AddLogSystem(logger.NewStdLogSystem(os.Stdout, log.LstdFlags, logger.DebugLevel)) |
||||
|
||||
pub, _ := secp256k1.GenerateKeyPair() |
||||
srv := p2p.Server{ |
||||
MaxPeers: 10, |
||||
Identity: p2p.NewSimpleClientIdentity("test", "1.0", "", string(pub)), |
||||
ListenAddr: ":30303", |
||||
NAT: p2p.PMP(net.ParseIP("10.0.0.1")), |
||||
} |
||||
if err := srv.Start(); err != nil { |
||||
fmt.Println("could not start server:", err) |
||||
os.Exit(1) |
||||
} |
||||
|
||||
// add seed peers
|
||||
seed, err := net.ResolveTCPAddr("tcp", "poc-7.ethdev.com:30303") |
||||
if err != nil { |
||||
fmt.Println("couldn't resolve:", err) |
||||
os.Exit(1) |
||||
} |
||||
srv.SuggestPeer(seed.IP, seed.Port, nil) |
||||
|
||||
select {} |
||||
} |
Loading…
Reference in new issue