mirror of https://github.com/ethereum/go-ethereum
Merge pull request #17041 from ethersphere/swarm-network-rewrite-merge
Swarm POC3 - happy solsticepull/17052/merge
commit
eaff89291c
@ -1,12 +1,32 @@ |
||||
# Lines starting with '#' are comments. |
||||
# Each line is a file pattern followed by one or more owners. |
||||
|
||||
accounts/usbwallet @karalabe |
||||
consensus @karalabe |
||||
core/ @karalabe @holiman |
||||
eth/ @karalabe |
||||
les/ @zsfelfoldi |
||||
light/ @zsfelfoldi |
||||
mobile/ @karalabe |
||||
p2p/ @fjl @zsfelfoldi |
||||
whisper/ @gballet @gluk256 |
||||
accounts/usbwallet @karalabe |
||||
consensus @karalabe |
||||
core/ @karalabe @holiman |
||||
eth/ @karalabe |
||||
les/ @zsfelfoldi |
||||
light/ @zsfelfoldi |
||||
mobile/ @karalabe |
||||
p2p/ @fjl @zsfelfoldi |
||||
swarm/bmt @zelig |
||||
swarm/dev @lmars |
||||
swarm/fuse @jmozah @holisticode |
||||
swarm/grafana_dashboards @nonsense |
||||
swarm/metrics @nonsense @holisticode |
||||
swarm/multihash @nolash |
||||
swarm/network/bitvector @zelig @janos @gbalint |
||||
swarm/network/priorityqueue @zelig @janos @gbalint |
||||
swarm/network/simulations @zelig |
||||
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad |
||||
swarm/network/stream/intervals @janos |
||||
swarm/network/stream/testing @zelig |
||||
swarm/pot @zelig |
||||
swarm/pss @nolash @zelig @nonsense |
||||
swarm/services @zelig |
||||
swarm/state @justelad |
||||
swarm/storage/encryption @gbalint @zelig @nagydani |
||||
swarm/storage/mock @janos |
||||
swarm/storage/mru @nolash |
||||
swarm/testutil @lmars |
||||
whisper/ @gballet @gluk256 |
||||
|
@ -1,560 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt |
||||
|
||||
import ( |
||||
"fmt" |
||||
"hash" |
||||
"io" |
||||
"strings" |
||||
"sync" |
||||
"sync/atomic" |
||||
) |
||||
|
||||
/* |
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size |
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments |
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3) |
||||
|
||||
It is used as the chunk hash function in swarm which in turn is the basis for the |
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a |
||||
segment is a substring of a chunk starting at a particular offset |
||||
The size of the underlying segments is fixed at 32 bytes (called the resolution |
||||
of the BMT hash), the EVM word size to optimize for on-chain BMT verification |
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash. |
||||
|
||||
Two implementations are provided: |
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation |
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic |
||||
control structure to coordinate the concurrent routines |
||||
It implements the ChunkHash interface as well as the go standard hash.Hash interface |
||||
|
||||
*/ |
||||
|
||||
const ( |
||||
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
|
||||
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
|
||||
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
DefaultPoolSize = 8 |
||||
) |
||||
|
||||
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
type BaseHasher func() hash.Hash |
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// implements the hash.Hash interface
|
||||
// reuse pool of Tree-s for amortised memory allocation and resource control
|
||||
// supports order-agnostic concurrent segment writes
|
||||
// as well as sequential read and write
|
||||
// can not be called concurrently on more than one chunk
|
||||
// can be further appended after Sum
|
||||
// Reset gives back the Tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
type Hasher struct { |
||||
pool *TreePool // BMT resource pool
|
||||
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
blocksize int // segment size (size of hash) also for hash.Hash
|
||||
count int // segment count
|
||||
size int // for hash.Hash same as hashsize
|
||||
cur int // cursor position for rightmost currently open chunk
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
depth int // index of last level
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
max int32 // max segments for SegmentWriter interface
|
||||
blockLength []byte // The block length that needes to be added in Sum
|
||||
} |
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new Tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher { |
||||
return &Hasher{ |
||||
pool: p, |
||||
depth: depth(p.SegmentCount), |
||||
size: p.SegmentSize, |
||||
blocksize: p.SegmentSize, |
||||
count: p.SegmentCount, |
||||
result: make(chan []byte), |
||||
} |
||||
} |
||||
|
||||
// Node is a reuseable segment hasher representing a node in a BMT
|
||||
// it allows for continued writes after a Sum
|
||||
// and is left in completely reusable state after Reset
|
||||
type Node struct { |
||||
level, index int // position of node for information/logging only
|
||||
initial bool // first and last node
|
||||
root bool // whether the node is root to a smaller BMT
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
unbalanced bool // indicates if a node has only the left segment
|
||||
parent *Node // BMT connections
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte |
||||
} |
||||
|
||||
// NewNode constructor for segment hasher nodes in the BMT
|
||||
func NewNode(level, index int, parent *Node) *Node { |
||||
return &Node{ |
||||
parent: parent, |
||||
level: level, |
||||
index: index, |
||||
initial: index == 0, |
||||
isLeft: index%2 == 0, |
||||
} |
||||
} |
||||
|
||||
// TreePool provides a pool of Trees used as resources by Hasher
|
||||
// a Tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
// Hasher Reset releases the Tree to the pool
|
||||
type TreePool struct { |
||||
lock sync.Mutex |
||||
c chan *Tree |
||||
hasher BaseHasher |
||||
SegmentSize int |
||||
SegmentCount int |
||||
Capacity int |
||||
count int |
||||
} |
||||
|
||||
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
|
||||
// on GetTree it reuses free Trees or creates a new one if size is not reached
|
||||
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool { |
||||
return &TreePool{ |
||||
c: make(chan *Tree, capacity), |
||||
hasher: hasher, |
||||
SegmentSize: hasher().Size(), |
||||
SegmentCount: segmentCount, |
||||
Capacity: capacity, |
||||
} |
||||
} |
||||
|
||||
// Drain drains the pool until it has no more than n resources
|
||||
func (p *TreePool) Drain(n int) { |
||||
p.lock.Lock() |
||||
defer p.lock.Unlock() |
||||
for len(p.c) > n { |
||||
<-p.c |
||||
p.count-- |
||||
} |
||||
} |
||||
|
||||
// Reserve is blocking until it returns an available Tree
|
||||
// it reuses free Trees or creates a new one if size is not reached
|
||||
func (p *TreePool) Reserve() *Tree { |
||||
p.lock.Lock() |
||||
defer p.lock.Unlock() |
||||
var t *Tree |
||||
if p.count == p.Capacity { |
||||
return <-p.c |
||||
} |
||||
select { |
||||
case t = <-p.c: |
||||
default: |
||||
t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount) |
||||
p.count++ |
||||
} |
||||
return t |
||||
} |
||||
|
||||
// Release gives back a Tree to the pool.
|
||||
// This Tree is guaranteed to be in reusable state
|
||||
// does not need locking
|
||||
func (p *TreePool) Release(t *Tree) { |
||||
p.c <- t // can never fail but...
|
||||
} |
||||
|
||||
// Tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to pick one for each chunk hash
|
||||
// the Tree is 'locked' while not in the pool
|
||||
type Tree struct { |
||||
leaves []*Node |
||||
} |
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (t *Tree) Draw(hash []byte, d int) string { |
||||
var left, right []string |
||||
var anc []*Node |
||||
for i, n := range t.leaves { |
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left))) |
||||
if i%2 == 0 { |
||||
anc = append(anc, n.parent) |
||||
} |
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right))) |
||||
} |
||||
anc = t.leaves |
||||
var hashes [][]string |
||||
for l := 0; len(anc) > 0; l++ { |
||||
var nodes []*Node |
||||
hash := []string{""} |
||||
for i, n := range anc { |
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right))) |
||||
if i%2 == 0 && n.parent != nil { |
||||
nodes = append(nodes, n.parent) |
||||
} |
||||
} |
||||
hash = append(hash, "") |
||||
hashes = append(hashes, hash) |
||||
anc = nodes |
||||
} |
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""}) |
||||
total := 60 |
||||
del := " " |
||||
var rows []string |
||||
for i := len(hashes) - 1; i >= 0; i-- { |
||||
var textlen int |
||||
hash := hashes[i] |
||||
for _, s := range hash { |
||||
textlen += len(s) |
||||
} |
||||
if total < textlen { |
||||
total = textlen + len(hash) |
||||
} |
||||
delsize := (total - textlen) / (len(hash) - 1) |
||||
if delsize > len(del) { |
||||
delsize = len(del) |
||||
} |
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize])) |
||||
rows = append(rows, row) |
||||
|
||||
} |
||||
rows = append(rows, strings.Join(left, " ")) |
||||
rows = append(rows, strings.Join(right, " ")) |
||||
return strings.Join(rows, "\n") + "\n" |
||||
} |
||||
|
||||
// NewTree initialises the Tree by building up the nodes of a BMT
|
||||
// segment size is stipulated to be the size of the hash
|
||||
// segmentCount needs to be positive integer and does not need to be
|
||||
// a power of two and can even be an odd number
|
||||
// segmentSize * segmentCount determines the maximum chunk size
|
||||
// hashed using the tree
|
||||
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree { |
||||
n := NewNode(0, 0, nil) |
||||
n.root = true |
||||
prevlevel := []*Node{n} |
||||
// iterate over levels and creates 2^level nodes
|
||||
level := 1 |
||||
count := 2 |
||||
for d := 1; d <= depth(segmentCount); d++ { |
||||
nodes := make([]*Node, count) |
||||
for i := 0; i < len(nodes); i++ { |
||||
parent := prevlevel[i/2] |
||||
t := NewNode(level, i, parent) |
||||
nodes[i] = t |
||||
} |
||||
prevlevel = nodes |
||||
level++ |
||||
count *= 2 |
||||
} |
||||
// the datanode level is the nodes on the last level where
|
||||
return &Tree{ |
||||
leaves: prevlevel, |
||||
} |
||||
} |
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (h *Hasher) Size() int { |
||||
return h.size |
||||
} |
||||
|
||||
// BlockSize returns the block size
|
||||
func (h *Hasher) BlockSize() int { |
||||
return h.blocksize |
||||
} |
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
func (h *Hasher) Sum(b []byte) (r []byte) { |
||||
t := h.bmt |
||||
i := h.cur |
||||
n := t.leaves[i] |
||||
j := i |
||||
// must run strictly before all nodes calculate
|
||||
// datanodes are guaranteed to have a parent
|
||||
if len(h.segment) > h.size && i > 0 && n.parent != nil { |
||||
n = n.parent |
||||
} else { |
||||
i *= 2 |
||||
} |
||||
d := h.finalise(n, i) |
||||
h.writeSegment(j, h.segment, d) |
||||
c := <-h.result |
||||
h.releaseTree() |
||||
|
||||
// sha3(length + BMT(pure_chunk))
|
||||
if h.blockLength == nil { |
||||
return c |
||||
} |
||||
res := h.pool.hasher() |
||||
res.Reset() |
||||
res.Write(h.blockLength) |
||||
res.Write(c) |
||||
return res.Sum(nil) |
||||
} |
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hash waits for the hasher result and returns it
|
||||
// caller must call this on a BMT Hasher being written to
|
||||
func (h *Hasher) Hash() []byte { |
||||
return <-h.result |
||||
} |
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash
|
||||
// with every full segment complete launches a hasher go routine
|
||||
// that shoots up the BMT
|
||||
func (h *Hasher) Write(b []byte) (int, error) { |
||||
l := len(b) |
||||
if l <= 0 { |
||||
return 0, nil |
||||
} |
||||
s := h.segment |
||||
i := h.cur |
||||
count := (h.count + 1) / 2 |
||||
need := h.count*h.size - h.cur*2*h.size |
||||
size := h.size |
||||
if need > size { |
||||
size *= 2 |
||||
} |
||||
if l < need { |
||||
need = l |
||||
} |
||||
// calculate missing bit to complete current open segment
|
||||
rest := size - len(s) |
||||
if need < rest { |
||||
rest = need |
||||
} |
||||
s = append(s, b[:rest]...) |
||||
need -= rest |
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 && i < count-1 { |
||||
// push all finished chunks we read
|
||||
h.writeSegment(i, s, h.depth) |
||||
need -= size |
||||
if need < 0 { |
||||
size += need |
||||
} |
||||
s = b[rest : rest+size] |
||||
rest += size |
||||
i++ |
||||
} |
||||
h.segment = s |
||||
h.cur = i |
||||
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
|
||||
return l, nil |
||||
} |
||||
|
||||
// Hasher implements the io.ReaderFrom interface
|
||||
|
||||
// ReadFrom reads from io.Reader and appends to the data to hash using Write
|
||||
// it reads so that chunk to hash is maximum length or reader reaches EOF
|
||||
// caller must Reset the hasher prior to call
|
||||
func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) { |
||||
bufsize := h.size*h.count - h.size*h.cur - len(h.segment) |
||||
buf := make([]byte, bufsize) |
||||
var read int |
||||
for { |
||||
var n int |
||||
n, err = r.Read(buf) |
||||
read += n |
||||
if err == io.EOF || read == len(buf) { |
||||
hash := h.Sum(buf[:n]) |
||||
if read == len(buf) { |
||||
err = NewEOC(hash) |
||||
} |
||||
break |
||||
} |
||||
if err != nil { |
||||
break |
||||
} |
||||
n, err = h.Write(buf[:n]) |
||||
if err != nil { |
||||
break |
||||
} |
||||
} |
||||
return int64(read), err |
||||
} |
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (h *Hasher) Reset() { |
||||
h.getTree() |
||||
h.blockLength = nil |
||||
} |
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the length of the data subsumed under the hash
|
||||
func (h *Hasher) ResetWithLength(l []byte) { |
||||
h.Reset() |
||||
h.blockLength = l |
||||
} |
||||
|
||||
// Release gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (h *Hasher) releaseTree() { |
||||
if h.bmt != nil { |
||||
n := h.bmt.leaves[h.cur] |
||||
for ; n != nil; n = n.parent { |
||||
n.unbalanced = false |
||||
if n.parent != nil { |
||||
n.root = false |
||||
} |
||||
} |
||||
h.pool.Release(h.bmt) |
||||
h.bmt = nil |
||||
|
||||
} |
||||
h.cur = 0 |
||||
h.segment = nil |
||||
} |
||||
|
||||
func (h *Hasher) writeSegment(i int, s []byte, d int) { |
||||
hash := h.pool.hasher() |
||||
n := h.bmt.leaves[i] |
||||
|
||||
if len(s) > h.size && n.parent != nil { |
||||
go func() { |
||||
hash.Reset() |
||||
hash.Write(s) |
||||
s = hash.Sum(nil) |
||||
|
||||
if n.root { |
||||
h.result <- s |
||||
return |
||||
} |
||||
h.run(n.parent, hash, d, n.index, s) |
||||
}() |
||||
return |
||||
} |
||||
go h.run(n, hash, d, i*2, s) |
||||
} |
||||
|
||||
func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) { |
||||
isLeft := i%2 == 0 |
||||
for { |
||||
if isLeft { |
||||
n.left = s |
||||
} else { |
||||
n.right = s |
||||
} |
||||
if !n.unbalanced && n.toggle() { |
||||
return |
||||
} |
||||
if !n.unbalanced || !isLeft || i == 0 && d == 0 { |
||||
hash.Reset() |
||||
hash.Write(n.left) |
||||
hash.Write(n.right) |
||||
s = hash.Sum(nil) |
||||
|
||||
} else { |
||||
s = append(n.left, n.right...) |
||||
} |
||||
|
||||
h.hash = s |
||||
if n.root { |
||||
h.result <- s |
||||
return |
||||
} |
||||
|
||||
isLeft = n.isLeft |
||||
n = n.parent |
||||
i++ |
||||
} |
||||
} |
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (h *Hasher) getTree() *Tree { |
||||
if h.bmt != nil { |
||||
return h.bmt |
||||
} |
||||
t := h.pool.Reserve() |
||||
h.bmt = t |
||||
return t |
||||
} |
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (n *Node) toggle() bool { |
||||
return atomic.AddInt32(&n.state, 1)%2 == 1 |
||||
} |
||||
|
||||
func hashstr(b []byte) string { |
||||
end := len(b) |
||||
if end > 4 { |
||||
end = 4 |
||||
} |
||||
return fmt.Sprintf("%x", b[:end]) |
||||
} |
||||
|
||||
func depth(n int) (d int) { |
||||
for l := (n - 1) / 2; l > 0; l /= 2 { |
||||
d++ |
||||
} |
||||
return d |
||||
} |
||||
|
||||
// finalise is following the zigzags on the tree belonging
|
||||
// to the final datasegment
|
||||
func (h *Hasher) finalise(n *Node, i int) (d int) { |
||||
isLeft := i%2 == 0 |
||||
for { |
||||
// when the final segment's path is going via left segments
|
||||
// the incoming data is pushed to the parent upon pulling the left
|
||||
// we do not need toggle the state since this condition is
|
||||
// detectable
|
||||
n.unbalanced = isLeft |
||||
n.right = nil |
||||
if n.initial { |
||||
n.root = true |
||||
return d |
||||
} |
||||
isLeft = n.isLeft |
||||
n = n.parent |
||||
d++ |
||||
} |
||||
} |
||||
|
||||
// EOC (end of chunk) implements the error interface
|
||||
type EOC struct { |
||||
Hash []byte // read the hash of the chunk off the error
|
||||
} |
||||
|
||||
// Error returns the error string
|
||||
func (e *EOC) Error() string { |
||||
return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash) |
||||
} |
||||
|
||||
// NewEOC creates new end of chunk error with the hash
|
||||
func NewEOC(hash []byte) *EOC { |
||||
return &EOC{hash} |
||||
} |
@ -1,85 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt is a simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// see testBMTHasherCorrectness function in bmt_test.go
|
||||
package bmt |
||||
|
||||
import ( |
||||
"hash" |
||||
) |
||||
|
||||
// RefHasher is the non-optimized easy to read reference implementation of BMT
|
||||
type RefHasher struct { |
||||
span int |
||||
section int |
||||
cap int |
||||
h hash.Hash |
||||
} |
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasher, count int) *RefHasher { |
||||
h := hasher() |
||||
hashsize := h.Size() |
||||
maxsize := hashsize * count |
||||
c := 2 |
||||
for ; c < count; c *= 2 { |
||||
} |
||||
if c > 2 { |
||||
c /= 2 |
||||
} |
||||
return &RefHasher{ |
||||
section: 2 * hashsize, |
||||
span: c * hashsize, |
||||
cap: maxsize, |
||||
h: h, |
||||
} |
||||
} |
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(d []byte) []byte { |
||||
if len(d) > rh.cap { |
||||
d = d[:rh.cap] |
||||
} |
||||
|
||||
return rh.hash(d, rh.span) |
||||
} |
||||
|
||||
func (rh *RefHasher) hash(d []byte, s int) []byte { |
||||
l := len(d) |
||||
left := d |
||||
var right []byte |
||||
if l > rh.section { |
||||
for ; s >= l; s /= 2 { |
||||
} |
||||
left = rh.hash(d[:s], s) |
||||
right = d[s:] |
||||
if l-s > rh.section/2 { |
||||
right = rh.hash(right, s) |
||||
} |
||||
} |
||||
defer rh.h.Reset() |
||||
rh.h.Write(left) |
||||
rh.h.Write(right) |
||||
h := rh.h.Sum(nil) |
||||
return h |
||||
} |
@ -1,481 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt |
||||
|
||||
import ( |
||||
"bytes" |
||||
crand "crypto/rand" |
||||
"fmt" |
||||
"hash" |
||||
"io" |
||||
"math/rand" |
||||
"sync" |
||||
"sync/atomic" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3" |
||||
) |
||||
|
||||
const ( |
||||
maxproccnt = 8 |
||||
) |
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) { |
||||
hashFunc := sha3.NewKeccak256 |
||||
|
||||
sha3 := func(data ...[]byte) []byte { |
||||
h := hashFunc() |
||||
for _, v := range data { |
||||
h.Write(v) |
||||
} |
||||
return h.Sum(nil) |
||||
} |
||||
|
||||
// the test struct is used to specify the expected BMT hash for data
|
||||
// lengths between "from" and "to"
|
||||
type test struct { |
||||
from int64 |
||||
to int64 |
||||
expected func([]byte) []byte |
||||
} |
||||
|
||||
var tests []*test |
||||
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3(data)
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 0, |
||||
to: 64, |
||||
expected: func(data []byte) []byte { |
||||
return sha3(data) |
||||
}, |
||||
}) |
||||
|
||||
// all lengths in [65,96] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// data[64:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 65, |
||||
to: 96, |
||||
expected: func(data []byte) []byte { |
||||
return sha3(sha3(data[:64]), data[64:]) |
||||
}, |
||||
}) |
||||
|
||||
// all lengths in [97,128] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 97, |
||||
to: 128, |
||||
expected: func(data []byte) []byte { |
||||
return sha3(sha3(data[:64]), sha3(data[64:])) |
||||
}, |
||||
}) |
||||
|
||||
// all lengths in [129,160] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// data[128:]
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 129, |
||||
to: 160, |
||||
expected: func(data []byte) []byte { |
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:]) |
||||
}, |
||||
}) |
||||
|
||||
// all lengths in [161,192] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(data[128:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 161, |
||||
to: 192, |
||||
expected: func(data []byte) []byte { |
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:])) |
||||
}, |
||||
}) |
||||
|
||||
// all lengths in [193,224] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// data[192:]
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 193, |
||||
to: 224, |
||||
expected: func(data []byte) []byte { |
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:])) |
||||
}, |
||||
}) |
||||
|
||||
// all lengths in [225,256] should be:
|
||||
//
|
||||
// sha3(
|
||||
// sha3(
|
||||
// sha3(data[:64])
|
||||
// sha3(data[64:128])
|
||||
// )
|
||||
// sha3(
|
||||
// sha3(data[128:192])
|
||||
// sha3(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 225, |
||||
to: 256, |
||||
expected: func(data []byte) []byte { |
||||
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:]))) |
||||
}, |
||||
}) |
||||
|
||||
// run the tests
|
||||
for _, x := range tests { |
||||
for length := x.from; length <= x.to; length++ { |
||||
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) { |
||||
data := make([]byte, length) |
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF { |
||||
t.Fatal(err) |
||||
} |
||||
expected := x.expected(data) |
||||
actual := NewRefHasher(hashFunc, 128).Hash(data) |
||||
if !bytes.Equal(actual, expected) { |
||||
t.Fatalf("expected %x, got %x", expected, actual) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func testDataReader(l int) (r io.Reader) { |
||||
return io.LimitReader(crand.Reader, int64(l)) |
||||
} |
||||
|
||||
func TestHasherCorrectness(t *testing.T) { |
||||
err := testHasher(testBaseHasher) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
func testHasher(f func(BaseHasher, []byte, int, int) error) error { |
||||
tdata := testDataReader(4128) |
||||
data := make([]byte, 4128) |
||||
tdata.Read(data) |
||||
hasher := sha3.NewKeccak256 |
||||
size := hasher().Size() |
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128} |
||||
|
||||
var err error |
||||
for _, count := range counts { |
||||
max := count * size |
||||
incr := 1 |
||||
for n := 0; n <= max+incr; n += incr { |
||||
err = f(hasher, data, n, count) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func TestHasherReuseWithoutRelease(t *testing.T) { |
||||
testHasherReuse(1, t) |
||||
} |
||||
|
||||
func TestHasherReuseWithRelease(t *testing.T) { |
||||
testHasherReuse(maxproccnt, t) |
||||
} |
||||
|
||||
func testHasherReuse(i int, t *testing.T) { |
||||
hasher := sha3.NewKeccak256 |
||||
pool := NewTreePool(hasher, 128, i) |
||||
defer pool.Drain(0) |
||||
bmt := New(pool) |
||||
|
||||
for i := 0; i < 500; i++ { |
||||
n := rand.Intn(4096) |
||||
tdata := testDataReader(n) |
||||
data := make([]byte, n) |
||||
tdata.Read(data) |
||||
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestHasherConcurrency(t *testing.T) { |
||||
hasher := sha3.NewKeccak256 |
||||
pool := NewTreePool(hasher, 128, maxproccnt) |
||||
defer pool.Drain(0) |
||||
wg := sync.WaitGroup{} |
||||
cycles := 100 |
||||
wg.Add(maxproccnt * cycles) |
||||
errc := make(chan error) |
||||
|
||||
for p := 0; p < maxproccnt; p++ { |
||||
for i := 0; i < cycles; i++ { |
||||
go func() { |
||||
bmt := New(pool) |
||||
n := rand.Intn(4096) |
||||
tdata := testDataReader(n) |
||||
data := make([]byte, n) |
||||
tdata.Read(data) |
||||
err := testHasherCorrectness(bmt, hasher, data, n, 128) |
||||
wg.Done() |
||||
if err != nil { |
||||
errc <- err |
||||
} |
||||
}() |
||||
} |
||||
} |
||||
go func() { |
||||
wg.Wait() |
||||
close(errc) |
||||
}() |
||||
var err error |
||||
select { |
||||
case <-time.NewTimer(5 * time.Second).C: |
||||
err = fmt.Errorf("timed out") |
||||
case err = <-errc: |
||||
} |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error { |
||||
pool := NewTreePool(hasher, count, 1) |
||||
defer pool.Drain(0) |
||||
bmt := New(pool) |
||||
return testHasherCorrectness(bmt, hasher, d, n, count) |
||||
} |
||||
|
||||
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) { |
||||
data := d[:n] |
||||
rbmt := NewRefHasher(hasher, count) |
||||
exp := rbmt.Hash(data) |
||||
timeout := time.NewTimer(time.Second) |
||||
c := make(chan error) |
||||
|
||||
go func() { |
||||
bmt.Reset() |
||||
bmt.Write(data) |
||||
got := bmt.Sum(nil) |
||||
if !bytes.Equal(got, exp) { |
||||
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got) |
||||
} |
||||
close(c) |
||||
}() |
||||
select { |
||||
case <-timeout.C: |
||||
err = fmt.Errorf("BMT hash calculation timed out") |
||||
case err = <-c: |
||||
} |
||||
return err |
||||
} |
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) } |
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) } |
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) } |
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) } |
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) } |
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) } |
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) } |
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) } |
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) } |
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) } |
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) } |
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) } |
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) } |
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) } |
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) } |
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) } |
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) } |
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) } |
||||
|
||||
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) } |
||||
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) } |
||||
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) } |
||||
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) } |
||||
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) } |
||||
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) } |
||||
|
||||
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) } |
||||
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) } |
||||
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) } |
||||
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) } |
||||
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) } |
||||
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) } |
||||
|
||||
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) } |
||||
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) } |
||||
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) } |
||||
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) } |
||||
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) } |
||||
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) } |
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n maxproccnt each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) { |
||||
tdata := testDataReader(64) |
||||
data := make([]byte, 64) |
||||
tdata.Read(data) |
||||
hasher := sha3.NewKeccak256 |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
count := int32((n-1)/hasher().Size() + 1) |
||||
wg := sync.WaitGroup{} |
||||
wg.Add(maxproccnt) |
||||
var i int32 |
||||
for j := 0; j < maxproccnt; j++ { |
||||
go func() { |
||||
defer wg.Done() |
||||
h := hasher() |
||||
for atomic.AddInt32(&i, 1) < count { |
||||
h.Reset() |
||||
h.Write(data) |
||||
h.Sum(nil) |
||||
} |
||||
}() |
||||
} |
||||
wg.Wait() |
||||
} |
||||
} |
||||
|
||||
func benchmarkHasher(n int, t *testing.B) { |
||||
tdata := testDataReader(n) |
||||
data := make([]byte, n) |
||||
tdata.Read(data) |
||||
|
||||
size := 1 |
||||
hasher := sha3.NewKeccak256 |
||||
segmentCount := 128 |
||||
pool := NewTreePool(hasher, segmentCount, size) |
||||
bmt := New(pool) |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
bmt.Reset() |
||||
bmt.Write(data) |
||||
bmt.Sum(nil) |
||||
} |
||||
} |
||||
|
||||
func benchmarkHasherReuse(poolsize, n int, t *testing.B) { |
||||
tdata := testDataReader(n) |
||||
data := make([]byte, n) |
||||
tdata.Read(data) |
||||
|
||||
hasher := sha3.NewKeccak256 |
||||
segmentCount := 128 |
||||
pool := NewTreePool(hasher, segmentCount, poolsize) |
||||
cycles := 200 |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
wg := sync.WaitGroup{} |
||||
wg.Add(cycles) |
||||
for j := 0; j < cycles; j++ { |
||||
bmt := New(pool) |
||||
go func() { |
||||
defer wg.Done() |
||||
bmt.Reset() |
||||
bmt.Write(data) |
||||
bmt.Sum(nil) |
||||
}() |
||||
} |
||||
wg.Wait() |
||||
} |
||||
} |
||||
|
||||
func benchmarkSHA3(n int, t *testing.B) { |
||||
data := make([]byte, n) |
||||
tdata := testDataReader(n) |
||||
tdata.Read(data) |
||||
hasher := sha3.NewKeccak256 |
||||
h := hasher() |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
h.Reset() |
||||
h.Write(data) |
||||
h.Sum(nil) |
||||
} |
||||
} |
||||
|
||||
func benchmarkRefHasher(n int, t *testing.B) { |
||||
data := make([]byte, n) |
||||
tdata := testDataReader(n) |
||||
tdata.Read(data) |
||||
hasher := sha3.NewKeccak256 |
||||
rbmt := NewRefHasher(hasher, 128) |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
rbmt.Hash(data) |
||||
} |
||||
} |
@ -0,0 +1,85 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"path/filepath" |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/api" |
||||
swarm "github.com/ethereum/go-ethereum/swarm/api/client" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
func download(ctx *cli.Context) { |
||||
log.Debug("downloading content using swarm down") |
||||
args := ctx.Args() |
||||
dest := "." |
||||
|
||||
switch len(args) { |
||||
case 0: |
||||
utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]") |
||||
case 1: |
||||
log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir")) |
||||
default: |
||||
log.Trace(fmt.Sprintf("destination path arg: %s", args[1])) |
||||
if absDest, err := filepath.Abs(args[1]); err == nil { |
||||
dest = absDest |
||||
} else { |
||||
utils.Fatalf("could not get download path: %v", err) |
||||
} |
||||
} |
||||
|
||||
var ( |
||||
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") |
||||
isRecursive = ctx.Bool(SwarmRecursiveFlag.Name) |
||||
client = swarm.NewClient(bzzapi) |
||||
) |
||||
|
||||
if fi, err := os.Stat(dest); err == nil { |
||||
if isRecursive && !fi.Mode().IsDir() { |
||||
utils.Fatalf("destination path is not a directory!") |
||||
} |
||||
} else { |
||||
if !os.IsNotExist(err) { |
||||
utils.Fatalf("could not stat path: %v", err) |
||||
} |
||||
} |
||||
|
||||
uri, err := api.Parse(args[0]) |
||||
if err != nil { |
||||
utils.Fatalf("could not parse uri argument: %v", err) |
||||
} |
||||
|
||||
// assume behaviour according to --recursive switch
|
||||
if isRecursive { |
||||
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil { |
||||
utils.Fatalf("encoutered an error while downloading directory: %v", err) |
||||
} |
||||
} else { |
||||
// we are downloading a file
|
||||
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path)) |
||||
|
||||
err := client.DownloadFile(uri.Addr, uri.Path, dest) |
||||
if err != nil { |
||||
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err) |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,139 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/md5" |
||||
"crypto/rand" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"strings" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/swarm" |
||||
) |
||||
|
||||
// TestCLISwarmExportImport perform the following test:
|
||||
// 1. runs swarm node
|
||||
// 2. uploads a random file
|
||||
// 3. runs an export of the local datastore
|
||||
// 4. runs a second swarm node
|
||||
// 5. imports the exported datastore
|
||||
// 6. fetches the uploaded random file from the second node
|
||||
func TestCLISwarmExportImport(t *testing.T) { |
||||
cluster := newTestCluster(t, 1) |
||||
|
||||
// generate random 10mb file
|
||||
f, cleanup := generateRandomFile(t, 10000000) |
||||
defer cleanup() |
||||
|
||||
// upload the file with 'swarm up' and expect a hash
|
||||
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name()) |
||||
_, matches := up.ExpectRegexp(`[a-f\d]{64}`) |
||||
up.ExpectExit() |
||||
hash := matches[0] |
||||
|
||||
var info swarm.Info |
||||
if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
cluster.Stop() |
||||
defer cluster.Cleanup() |
||||
|
||||
// generate an export.tar
|
||||
exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x")) |
||||
exportCmd.ExpectExit() |
||||
|
||||
// start second cluster
|
||||
cluster2 := newTestCluster(t, 1) |
||||
|
||||
var info2 swarm.Info |
||||
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// stop second cluster, so that we close LevelDB
|
||||
cluster2.Stop() |
||||
defer cluster2.Cleanup() |
||||
|
||||
// import the export.tar
|
||||
importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x")) |
||||
importCmd.ExpectExit() |
||||
|
||||
// spin second cluster back up
|
||||
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x")) |
||||
|
||||
// try to fetch imported file
|
||||
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if res.StatusCode != 200 { |
||||
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status) |
||||
} |
||||
|
||||
// compare downloaded file with the generated random file
|
||||
mustEqualFiles(t, f, res.Body) |
||||
} |
||||
|
||||
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) { |
||||
h := md5.New() |
||||
upLen, err := io.Copy(h, up) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
upHash := h.Sum(nil) |
||||
h.Reset() |
||||
downLen, err := io.Copy(h, down) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
downHash := h.Sum(nil) |
||||
|
||||
if !bytes.Equal(upHash, downHash) || upLen != downLen { |
||||
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen) |
||||
} |
||||
} |
||||
|
||||
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) { |
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() { |
||||
tmp.Close() |
||||
os.Remove(tmp.Name()) |
||||
} |
||||
|
||||
// write 10mb random data to file
|
||||
buf := make([]byte, 10000000) |
||||
_, err = rand.Read(buf) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
ioutil.WriteFile(tmp.Name(), buf, 0755) |
||||
|
||||
return tmp, teardown |
||||
} |
@ -0,0 +1,127 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"path/filepath" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm/fuse" |
||||
"gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
func mount(cliContext *cli.Context) { |
||||
args := cliContext.Args() |
||||
if len(args) < 2 { |
||||
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>") |
||||
} |
||||
|
||||
client, err := dialRPC(cliContext) |
||||
if err != nil { |
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err) |
||||
} |
||||
defer client.Close() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
mf := &fuse.MountInfo{} |
||||
mountPoint, err := filepath.Abs(filepath.Clean(args[1])) |
||||
if err != nil { |
||||
utils.Fatalf("error expanding path for mount point: %v", err) |
||||
} |
||||
err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint) |
||||
if err != nil { |
||||
utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err) |
||||
} |
||||
} |
||||
|
||||
func unmount(cliContext *cli.Context) { |
||||
args := cliContext.Args() |
||||
|
||||
if len(args) < 1 { |
||||
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>") |
||||
} |
||||
client, err := dialRPC(cliContext) |
||||
if err != nil { |
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err) |
||||
} |
||||
defer client.Close() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
mf := fuse.MountInfo{} |
||||
err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0]) |
||||
if err != nil { |
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err) |
||||
} |
||||
fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
|
||||
} |
||||
|
||||
func listMounts(cliContext *cli.Context) { |
||||
client, err := dialRPC(cliContext) |
||||
if err != nil { |
||||
utils.Fatalf("had an error dailing to RPC endpoint: %v", err) |
||||
} |
||||
defer client.Close() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
||||
defer cancel() |
||||
|
||||
mf := []fuse.MountInfo{} |
||||
err = client.CallContext(ctx, &mf, "swarmfs_listmounts") |
||||
if err != nil { |
||||
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err) |
||||
} |
||||
if len(mf) == 0 { |
||||
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n") |
||||
} else { |
||||
fmt.Printf("Found %d swarmfs mount(s):\n", len(mf)) |
||||
for i, mountInfo := range mf { |
||||
fmt.Printf("%d:\n", i) |
||||
fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint) |
||||
fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest) |
||||
fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func dialRPC(ctx *cli.Context) (*rpc.Client, error) { |
||||
var endpoint string |
||||
|
||||
if ctx.IsSet(utils.IPCPathFlag.Name) { |
||||
endpoint = ctx.String(utils.IPCPathFlag.Name) |
||||
} else { |
||||
utils.Fatalf("swarm ipc endpoint not specified") |
||||
} |
||||
|
||||
if endpoint == "" { |
||||
endpoint = node.DefaultIPCEndpoint(clientIdentifier) |
||||
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") { |
||||
// Backwards compatibility with geth < 1.5 which required
|
||||
// these prefixes.
|
||||
endpoint = endpoint[4:] |
||||
} |
||||
return rpc.Dial(endpoint) |
||||
} |
@ -0,0 +1,234 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"io" |
||||
"io/ioutil" |
||||
"os" |
||||
"path/filepath" |
||||
"strings" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
colorable "github.com/mattn/go-colorable" |
||||
) |
||||
|
||||
func init() { |
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) |
||||
} |
||||
|
||||
type testFile struct { |
||||
filePath string |
||||
content string |
||||
} |
||||
|
||||
// TestCLISwarmFs is a high-level test of swarmfs
|
||||
func TestCLISwarmFs(t *testing.T) { |
||||
cluster := newTestCluster(t, 3) |
||||
defer cluster.Shutdown() |
||||
|
||||
// create a tmp dir
|
||||
mountPoint, err := ioutil.TempDir("", "swarm-test") |
||||
log.Debug("swarmfs cli test", "1st mount", mountPoint) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(mountPoint) |
||||
|
||||
handlingNode := cluster.Nodes[0] |
||||
mhash := doUploadEmptyDir(t, handlingNode) |
||||
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) |
||||
|
||||
mount := runSwarm(t, []string{ |
||||
"fs", |
||||
"mount", |
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), |
||||
mhash, |
||||
mountPoint, |
||||
}...) |
||||
mount.ExpectExit() |
||||
|
||||
filesToAssert := []*testFile{} |
||||
|
||||
dirPath, err := createDirInDir(mountPoint, "testSubDir") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir") |
||||
|
||||
dummyContent := "somerandomtestcontentthatshouldbeasserted" |
||||
dirs := []string{ |
||||
mountPoint, |
||||
dirPath, |
||||
dirPath2, |
||||
} |
||||
files := []string{"f1.tmp", "f2.tmp"} |
||||
for _, d := range dirs { |
||||
for _, entry := range files { |
||||
tFile, err := createTestFileInPath(d, entry, dummyContent) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
filesToAssert = append(filesToAssert, tFile) |
||||
} |
||||
} |
||||
if len(filesToAssert) != len(dirs)*len(files) { |
||||
t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert)) |
||||
} |
||||
hashRegexp := `[a-f\d]{64}` |
||||
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) |
||||
|
||||
unmount := runSwarm(t, []string{ |
||||
"fs", |
||||
"unmount", |
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), |
||||
mountPoint, |
||||
}...) |
||||
_, matches := unmount.ExpectRegexp(hashRegexp) |
||||
unmount.ExpectExit() |
||||
|
||||
hash := matches[0] |
||||
if hash == mhash { |
||||
t.Fatal("this should not be equal") |
||||
} |
||||
log.Debug("swarmfs cli test: asserting no files in mount point") |
||||
|
||||
//check that there's nothing in the mount folder
|
||||
filesInDir, err := ioutil.ReadDir(mountPoint) |
||||
if err != nil { |
||||
t.Fatalf("had an error reading the directory: %v", err) |
||||
} |
||||
|
||||
if len(filesInDir) != 0 { |
||||
t.Fatal("there shouldn't be anything here") |
||||
} |
||||
|
||||
secondMountPoint, err := ioutil.TempDir("", "swarm-test") |
||||
log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(secondMountPoint) |
||||
|
||||
log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) |
||||
|
||||
//remount, check files
|
||||
newMount := runSwarm(t, []string{ |
||||
"fs", |
||||
"mount", |
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), |
||||
hash, // the latest hash
|
||||
secondMountPoint, |
||||
}...) |
||||
|
||||
newMount.ExpectExit() |
||||
time.Sleep(1 * time.Second) |
||||
|
||||
filesInDir, err = ioutil.ReadDir(secondMountPoint) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
if len(filesInDir) == 0 { |
||||
t.Fatal("there should be something here") |
||||
} |
||||
|
||||
log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount") |
||||
|
||||
for _, file := range filesToAssert { |
||||
file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1) |
||||
fileBytes, err := ioutil.ReadFile(file.filePath) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) { |
||||
t.Fatal("this should be equal") |
||||
} |
||||
} |
||||
|
||||
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) |
||||
|
||||
unmountSec := runSwarm(t, []string{ |
||||
"fs", |
||||
"unmount", |
||||
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), |
||||
secondMountPoint, |
||||
}...) |
||||
|
||||
_, matches = unmountSec.ExpectRegexp(hashRegexp) |
||||
unmountSec.ExpectExit() |
||||
|
||||
if matches[0] != hash { |
||||
t.Fatal("these should be equal - no changes made") |
||||
} |
||||
} |
||||
|
||||
func doUploadEmptyDir(t *testing.T, node *testNode) string { |
||||
// create a tmp dir
|
||||
tmpDir, err := ioutil.TempDir("", "swarm-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(tmpDir) |
||||
|
||||
hashRegexp := `[a-f\d]{64}` |
||||
|
||||
flags := []string{ |
||||
"--bzzapi", node.URL, |
||||
"--recursive", |
||||
"up", |
||||
tmpDir} |
||||
|
||||
log.Info("swarmfs cli test: uploading dir with 'swarm up'") |
||||
up := runSwarm(t, flags...) |
||||
_, matches := up.ExpectRegexp(hashRegexp) |
||||
up.ExpectExit() |
||||
hash := matches[0] |
||||
log.Info("swarmfs cli test: dir uploaded", "hash", hash) |
||||
return hash |
||||
} |
||||
|
||||
func createDirInDir(createInDir string, dirToCreate string) (string, error) { |
||||
fullpath := filepath.Join(createInDir, dirToCreate) |
||||
err := os.MkdirAll(fullpath, 0777) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
return fullpath, nil |
||||
} |
||||
|
||||
func createTestFileInPath(dir, filename, content string) (*testFile, error) { |
||||
tFile := &testFile{} |
||||
filePath := filepath.Join(dir, filename) |
||||
if file, err := os.Create(filePath); err == nil { |
||||
tFile.content = content |
||||
tFile.filePath = filePath |
||||
|
||||
_, err = io.WriteString(file, content) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
file.Close() |
||||
} |
||||
|
||||
return tFile, nil |
||||
} |
@ -0,0 +1,101 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"os" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
colorable "github.com/mattn/go-colorable" |
||||
|
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
var ( |
||||
endpoints []string |
||||
includeLocalhost bool |
||||
cluster string |
||||
scheme string |
||||
filesize int |
||||
from int |
||||
to int |
||||
) |
||||
|
||||
func main() { |
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) |
||||
|
||||
app := cli.NewApp() |
||||
app.Name = "smoke-test" |
||||
app.Usage = "" |
||||
|
||||
app.Flags = []cli.Flag{ |
||||
cli.StringFlag{ |
||||
Name: "cluster-endpoint", |
||||
Value: "testing", |
||||
Usage: "cluster to point to (open, or testing)", |
||||
Destination: &cluster, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "cluster-from", |
||||
Value: 8501, |
||||
Usage: "swarm node (from)", |
||||
Destination: &from, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "cluster-to", |
||||
Value: 8512, |
||||
Usage: "swarm node (to)", |
||||
Destination: &to, |
||||
}, |
||||
cli.StringFlag{ |
||||
Name: "cluster-scheme", |
||||
Value: "http", |
||||
Usage: "http or https", |
||||
Destination: &scheme, |
||||
}, |
||||
cli.BoolFlag{ |
||||
Name: "include-localhost", |
||||
Usage: "whether to include localhost:8500 as an endpoint", |
||||
Destination: &includeLocalhost, |
||||
}, |
||||
cli.IntFlag{ |
||||
Name: "filesize", |
||||
Value: 1, |
||||
Usage: "file size for generated random file in MB", |
||||
Destination: &filesize, |
||||
}, |
||||
} |
||||
|
||||
app.Commands = []cli.Command{ |
||||
{ |
||||
Name: "upload_and_sync", |
||||
Aliases: []string{"c"}, |
||||
Usage: "upload and sync", |
||||
Action: cliUploadAndSync, |
||||
}, |
||||
} |
||||
|
||||
sort.Sort(cli.FlagsByName(app.Flags)) |
||||
sort.Sort(cli.CommandsByName(app.Commands)) |
||||
|
||||
err := app.Run(os.Args) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
} |
||||
} |
@ -0,0 +1,184 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of go-ethereum.
|
||||
//
|
||||
// go-ethereum is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// go-ethereum is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main |
||||
|
||||
import ( |
||||
"bytes" |
||||
"crypto/md5" |
||||
"crypto/rand" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"os" |
||||
"os/exec" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/pborman/uuid" |
||||
|
||||
cli "gopkg.in/urfave/cli.v1" |
||||
) |
||||
|
||||
func generateEndpoints(scheme string, cluster string, from int, to int) { |
||||
for port := from; port <= to; port++ { |
||||
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster)) |
||||
} |
||||
|
||||
if includeLocalhost { |
||||
endpoints = append(endpoints, "http://localhost:8500") |
||||
} |
||||
} |
||||
|
||||
func cliUploadAndSync(c *cli.Context) error { |
||||
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now()) |
||||
|
||||
generateEndpoints(scheme, cluster, from, to) |
||||
|
||||
log.Info("uploading to " + endpoints[0] + " and syncing") |
||||
|
||||
f, cleanup := generateRandomFile(filesize * 1000000) |
||||
defer cleanup() |
||||
|
||||
hash, err := upload(f, endpoints[0]) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
|
||||
fhash, err := digest(f) |
||||
if err != nil { |
||||
log.Error(err.Error()) |
||||
return err |
||||
} |
||||
|
||||
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash)) |
||||
|
||||
if filesize < 10 { |
||||
time.Sleep(15 * time.Second) |
||||
} else { |
||||
time.Sleep(2 * time.Duration(filesize) * time.Second) |
||||
} |
||||
|
||||
wg := sync.WaitGroup{} |
||||
for _, endpoint := range endpoints { |
||||
endpoint := endpoint |
||||
ruid := uuid.New()[:8] |
||||
wg.Add(1) |
||||
go func(endpoint string, ruid string) { |
||||
for { |
||||
err := fetch(hash, endpoint, fhash, ruid) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
|
||||
wg.Done() |
||||
return |
||||
} |
||||
}(endpoint, ruid) |
||||
} |
||||
wg.Wait() |
||||
log.Info("all endpoints synced random file successfully") |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
|
||||
func fetch(hash string, endpoint string, original []byte, ruid string) error { |
||||
log.Trace("sleeping", "ruid", ruid) |
||||
time.Sleep(1 * time.Second) |
||||
|
||||
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash) |
||||
res, err := http.Get(endpoint + "/bzz:/" + hash + "/") |
||||
if err != nil { |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength) |
||||
|
||||
if res.StatusCode != 200 { |
||||
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode) |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
defer res.Body.Close() |
||||
|
||||
rdigest, err := digest(res.Body) |
||||
if err != nil { |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
if !bytes.Equal(rdigest, original) { |
||||
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original) |
||||
log.Warn(err.Error(), "ruid", ruid) |
||||
return err |
||||
} |
||||
|
||||
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
|
||||
func upload(f *os.File, endpoint string) (string, error) { |
||||
var out bytes.Buffer |
||||
cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name()) |
||||
cmd.Stdout = &out |
||||
err := cmd.Run() |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
hash := strings.TrimRight(out.String(), "\r\n") |
||||
return hash, nil |
||||
} |
||||
|
||||
func digest(r io.Reader) ([]byte, error) { |
||||
h := md5.New() |
||||
_, err := io.Copy(h, r) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return h.Sum(nil), nil |
||||
} |
||||
|
||||
// generateRandomFile is creating a temporary file with the requested byte size
|
||||
func generateRandomFile(size int) (f *os.File, teardown func()) { |
||||
// create a tmp file
|
||||
tmp, err := ioutil.TempFile("", "swarm-test") |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
// callback for tmp file cleanup
|
||||
teardown = func() { |
||||
tmp.Close() |
||||
os.Remove(tmp.Name()) |
||||
} |
||||
|
||||
buf := make([]byte, size) |
||||
_, err = rand.Read(buf) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
ioutil.WriteFile(tmp.Name(), buf, 0755) |
||||
|
||||
return tmp, teardown |
||||
} |
@ -0,0 +1,35 @@ |
||||
# Core team members |
||||
|
||||
Viktor Trón - @zelig |
||||
Louis Holbrook - @nolash |
||||
Lewis Marshall - @lmars |
||||
Anton Evangelatov - @nonsense |
||||
Janoš Guljaš - @janos |
||||
Balint Gabor - @gbalint |
||||
Elad Nachmias - @justelad |
||||
Daniel A. Nagy - @nagydani |
||||
Aron Fischer - @homotopycolimit |
||||
Fabio Barone - @holisticode |
||||
Zahoor Mohamed - @jmozah |
||||
Zsolt Felföldi - @zsfelfoldi |
||||
|
||||
# External contributors |
||||
|
||||
Kiel Barry |
||||
Gary Rong |
||||
Jared Wasinger |
||||
Leon Stanko |
||||
Javier Peletier [epiclabs.io] |
||||
Bartek Borkowski [tungsten-labs.com] |
||||
Shane Howley [mainframe.com] |
||||
Doug Leonard [mainframe.com] |
||||
Ivan Daniluk [status.im] |
||||
Felix Lange [EF] |
||||
Martin Holst Swende [EF] |
||||
Guillaume Ballet [EF] |
||||
ligi [EF] |
||||
Christopher Dro [blick-labs.com] |
||||
Sergii Bomko [ledgerleopard.com] |
||||
Domino Valdano |
||||
Rafael Matias |
||||
Coogan Brennan |
@ -0,0 +1,26 @@ |
||||
# Ownership by go packages |
||||
|
||||
swarm |
||||
├── api ─────────────────── ethersphere |
||||
├── bmt ─────────────────── @zelig |
||||
├── dev ─────────────────── @lmars |
||||
├── fuse ────────────────── @jmozah, @holisticode |
||||
├── grafana_dashboards ──── @nonsense |
||||
├── metrics ─────────────── @nonsense, @holisticode |
||||
├── multihash ───────────── @nolash |
||||
├── network ─────────────── ethersphere |
||||
│ ├── bitvector ───────── @zelig, @janos, @gbalint |
||||
│ ├── priorityqueue ───── @zelig, @janos, @gbalint |
||||
│ ├── simulations ─────── @zelig |
||||
│ └── stream ──────────── @janos, @zelig, @gbalint, @holisticode, @justelad |
||||
│ ├── intervals ───── @janos |
||||
│ └── testing ─────── @zelig |
||||
├── pot ─────────────────── @zelig |
||||
├── pss ─────────────────── @nolash, @zelig, @nonsense |
||||
├── services ────────────── @zelig |
||||
├── state ───────────────── @justelad |
||||
├── storage ─────────────── ethersphere |
||||
│ ├── encryption ──────── @gbalint, @zelig, @nagydani |
||||
│ ├── mock ────────────── @janos |
||||
│ └── mru ─────────────── @nolash |
||||
└── testutil ────────────── @lmars |
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -0,0 +1,543 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation
|
||||
package bmt |
||||
|
||||
import ( |
||||
"fmt" |
||||
"hash" |
||||
"strings" |
||||
"sync" |
||||
"sync/atomic" |
||||
) |
||||
|
||||
/* |
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size |
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments |
||||
of the underlying chunk using any base hash function (e.g keccak 256 SHA3). |
||||
Chunk with data shorter than the fixed size are hashed as if they had zero padding |
||||
|
||||
BMT hash is used as the chunk hash function in swarm which in turn is the basis for the |
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a |
||||
segment is a substring of a chunk starting at a particular offset |
||||
The size of the underlying segments is fixed to the size of the base hash (called the resolution |
||||
of the BMT hash), Using Keccak256 SHA3 hash is 32 bytes, the EVM word size to optimize for on-chain BMT verification |
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash. |
||||
|
||||
Two implementations are provided: |
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation |
||||
that is simple to understand |
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic |
||||
control structure to coordinate the concurrent routines |
||||
It implements the following interfaces |
||||
* standard golang hash.Hash |
||||
* SwarmHash |
||||
* io.Writer |
||||
* TODO: SegmentWriter |
||||
*/ |
||||
|
||||
const ( |
||||
// SegmentCount is the maximum number of segments of the underlying chunk
|
||||
// Should be equal to max-chunk-data-size / hash-size
|
||||
SegmentCount = 128 |
||||
// PoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
PoolSize = 8 |
||||
) |
||||
|
||||
// BaseHasherFunc is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
// implemented by Keccak256 SHA3 sha3.NewKeccak256
|
||||
type BaseHasherFunc func() hash.Hash |
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// - implements the hash.Hash interface
|
||||
// - reuses a pool of trees for amortised memory allocation and resource control
|
||||
// - supports order-agnostic concurrent segment writes (TODO:)
|
||||
// as well as sequential read and write
|
||||
// - the same hasher instance must not be called concurrently on more than one chunk
|
||||
// - the same hasher instance is synchronously reuseable
|
||||
// - Sum gives back the tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
// - generates and verifies segment inclusion proofs (TODO:)
|
||||
type Hasher struct { |
||||
pool *TreePool // BMT resource pool
|
||||
bmt *tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
} |
||||
|
||||
// New creates a reusable Hasher
|
||||
// implements the hash.Hash interface
|
||||
// pulls a new tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher { |
||||
return &Hasher{ |
||||
pool: p, |
||||
} |
||||
} |
||||
|
||||
// TreePool provides a pool of trees used as resources by Hasher
|
||||
// a tree popped from the pool is guaranteed to have clean state
|
||||
// for hashing a new chunk
|
||||
type TreePool struct { |
||||
lock sync.Mutex |
||||
c chan *tree // the channel to obtain a resource from the pool
|
||||
hasher BaseHasherFunc // base hasher to use for the BMT levels
|
||||
SegmentSize int // size of leaf segments, stipulated to be = hash size
|
||||
SegmentCount int // the number of segments on the base level of the BMT
|
||||
Capacity int // pool capacity, controls concurrency
|
||||
Depth int // depth of the bmt trees = int(log2(segmentCount))+1
|
||||
Datalength int // the total length of the data (count * size)
|
||||
count int // current count of (ever) allocated resources
|
||||
zerohashes [][]byte // lookup table for predictable padding subtrees for all levels
|
||||
} |
||||
|
||||
// NewTreePool creates a tree pool with hasher, segment size, segment count and capacity
|
||||
// on Hasher.getTree it reuses free trees or creates a new one if capacity is not reached
|
||||
func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool { |
||||
// initialises the zerohashes lookup table
|
||||
depth := calculateDepthFor(segmentCount) |
||||
segmentSize := hasher().Size() |
||||
zerohashes := make([][]byte, depth) |
||||
zeros := make([]byte, segmentSize) |
||||
zerohashes[0] = zeros |
||||
h := hasher() |
||||
for i := 1; i < depth; i++ { |
||||
h.Reset() |
||||
h.Write(zeros) |
||||
h.Write(zeros) |
||||
zeros = h.Sum(nil) |
||||
zerohashes[i] = zeros |
||||
} |
||||
return &TreePool{ |
||||
c: make(chan *tree, capacity), |
||||
hasher: hasher, |
||||
SegmentSize: segmentSize, |
||||
SegmentCount: segmentCount, |
||||
Capacity: capacity, |
||||
Datalength: segmentCount * segmentSize, |
||||
Depth: depth, |
||||
zerohashes: zerohashes, |
||||
} |
||||
} |
||||
|
||||
// Drain drains the pool until it has no more than n resources
|
||||
func (p *TreePool) Drain(n int) { |
||||
p.lock.Lock() |
||||
defer p.lock.Unlock() |
||||
for len(p.c) > n { |
||||
<-p.c |
||||
p.count-- |
||||
} |
||||
} |
||||
|
||||
// Reserve is blocking until it returns an available tree
|
||||
// it reuses free trees or creates a new one if size is not reached
|
||||
// TODO: should use a context here
|
||||
func (p *TreePool) reserve() *tree { |
||||
p.lock.Lock() |
||||
defer p.lock.Unlock() |
||||
var t *tree |
||||
if p.count == p.Capacity { |
||||
return <-p.c |
||||
} |
||||
select { |
||||
case t = <-p.c: |
||||
default: |
||||
t = newTree(p.SegmentSize, p.Depth) |
||||
p.count++ |
||||
} |
||||
return t |
||||
} |
||||
|
||||
// release gives back a tree to the pool.
|
||||
// this tree is guaranteed to be in reusable state
|
||||
func (p *TreePool) release(t *tree) { |
||||
p.c <- t // can never fail ...
|
||||
} |
||||
|
||||
// tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to obtain a tree for each chunk hash
|
||||
// the tree is 'locked' while not in the pool
|
||||
type tree struct { |
||||
leaves []*node // leaf nodes of the tree, other nodes accessible via parent links
|
||||
cur int // index of rightmost currently open segment
|
||||
offset int // offset (cursor position) within currently open segment
|
||||
segment []byte // the rightmost open segment (not complete)
|
||||
section []byte // the rightmost open section (double segment)
|
||||
depth int // number of levels
|
||||
result chan []byte // result channel
|
||||
hash []byte // to record the result
|
||||
span []byte // The span of the data subsumed under the chunk
|
||||
} |
||||
|
||||
// node is a reuseable segment hasher representing a node in a BMT
|
||||
type node struct { |
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
parent *node // pointer to parent node in the BMT
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte // this is where the content segment is set
|
||||
} |
||||
|
||||
// newNode constructs a segment hasher node in the BMT (used by newTree)
|
||||
func newNode(index int, parent *node) *node { |
||||
return &node{ |
||||
parent: parent, |
||||
isLeft: index%2 == 0, |
||||
} |
||||
} |
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (t *tree) draw(hash []byte) string { |
||||
var left, right []string |
||||
var anc []*node |
||||
for i, n := range t.leaves { |
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left))) |
||||
if i%2 == 0 { |
||||
anc = append(anc, n.parent) |
||||
} |
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right))) |
||||
} |
||||
anc = t.leaves |
||||
var hashes [][]string |
||||
for l := 0; len(anc) > 0; l++ { |
||||
var nodes []*node |
||||
hash := []string{""} |
||||
for i, n := range anc { |
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right))) |
||||
if i%2 == 0 && n.parent != nil { |
||||
nodes = append(nodes, n.parent) |
||||
} |
||||
} |
||||
hash = append(hash, "") |
||||
hashes = append(hashes, hash) |
||||
anc = nodes |
||||
} |
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""}) |
||||
total := 60 |
||||
del := " " |
||||
var rows []string |
||||
for i := len(hashes) - 1; i >= 0; i-- { |
||||
var textlen int |
||||
hash := hashes[i] |
||||
for _, s := range hash { |
||||
textlen += len(s) |
||||
} |
||||
if total < textlen { |
||||
total = textlen + len(hash) |
||||
} |
||||
delsize := (total - textlen) / (len(hash) - 1) |
||||
if delsize > len(del) { |
||||
delsize = len(del) |
||||
} |
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize])) |
||||
rows = append(rows, row) |
||||
|
||||
} |
||||
rows = append(rows, strings.Join(left, " ")) |
||||
rows = append(rows, strings.Join(right, " ")) |
||||
return strings.Join(rows, "\n") + "\n" |
||||
} |
||||
|
||||
// newTree initialises a tree by building up the nodes of a BMT
|
||||
// - segment size is stipulated to be the size of the hash
|
||||
func newTree(segmentSize, depth int) *tree { |
||||
n := newNode(0, nil) |
||||
prevlevel := []*node{n} |
||||
// iterate over levels and creates 2^(depth-level) nodes
|
||||
count := 2 |
||||
for level := depth - 2; level >= 0; level-- { |
||||
nodes := make([]*node, count) |
||||
for i := 0; i < count; i++ { |
||||
parent := prevlevel[i/2] |
||||
nodes[i] = newNode(i, parent) |
||||
} |
||||
prevlevel = nodes |
||||
count *= 2 |
||||
} |
||||
// the datanode level is the nodes on the last level
|
||||
return &tree{ |
||||
leaves: prevlevel, |
||||
result: make(chan []byte, 1), |
||||
segment: make([]byte, segmentSize), |
||||
section: make([]byte, 2*segmentSize), |
||||
} |
||||
} |
||||
|
||||
// methods needed by hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (h *Hasher) Size() int { |
||||
return h.pool.SegmentSize |
||||
} |
||||
|
||||
// BlockSize returns the block size
|
||||
func (h *Hasher) BlockSize() int { |
||||
return h.pool.SegmentSize |
||||
} |
||||
|
||||
// Hash hashes the data and the span using the bmt hasher
|
||||
func Hash(h *Hasher, span, data []byte) []byte { |
||||
h.ResetWithLength(span) |
||||
h.Write(data) |
||||
return h.Sum(nil) |
||||
} |
||||
|
||||
// Datalength returns the maximum data size that is hashed by the hasher =
|
||||
// segment count times segment size
|
||||
func (h *Hasher) DataLength() int { |
||||
return h.pool.Datalength |
||||
} |
||||
|
||||
// Sum returns the hash of the buffer
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
// caller must make sure Sum is not called concurrently with Write, writeSection
|
||||
// and WriteSegment (TODO:)
|
||||
func (h *Hasher) Sum(b []byte) (r []byte) { |
||||
return h.sum(b, true, true) |
||||
} |
||||
|
||||
// sum implements Sum taking parameters
|
||||
// * if the tree is released right away
|
||||
// * if sequential write is used (can read sections)
|
||||
func (h *Hasher) sum(b []byte, release, section bool) (r []byte) { |
||||
t := h.bmt |
||||
h.finalise(section) |
||||
if t.offset > 0 { // get the last node (double segment)
|
||||
|
||||
// padding the segment with zero
|
||||
copy(t.segment[t.offset:], h.pool.zerohashes[0]) |
||||
} |
||||
if section { |
||||
if t.cur%2 == 1 { |
||||
// if just finished current segment, copy it to the right half of the chunk
|
||||
copy(t.section[h.pool.SegmentSize:], t.segment) |
||||
} else { |
||||
// copy segment to front of section, zero pad the right half
|
||||
copy(t.section, t.segment) |
||||
copy(t.section[h.pool.SegmentSize:], h.pool.zerohashes[0]) |
||||
} |
||||
h.writeSection(t.cur, t.section) |
||||
} else { |
||||
// TODO: h.writeSegment(t.cur, t.segment)
|
||||
panic("SegmentWriter not implemented") |
||||
} |
||||
bmtHash := <-t.result |
||||
span := t.span |
||||
|
||||
if release { |
||||
h.releaseTree() |
||||
} |
||||
// sha3(span + BMT(pure_chunk))
|
||||
if span == nil { |
||||
return bmtHash |
||||
} |
||||
bh := h.pool.hasher() |
||||
bh.Reset() |
||||
bh.Write(span) |
||||
bh.Write(bmtHash) |
||||
return bh.Sum(b) |
||||
} |
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// Hasher implements the io.Writer interface
|
||||
|
||||
// Write fills the buffer to hash,
|
||||
// with every full segment calls writeSection
|
||||
func (h *Hasher) Write(b []byte) (int, error) { |
||||
l := len(b) |
||||
if l <= 0 { |
||||
return 0, nil |
||||
} |
||||
t := h.bmt |
||||
need := (h.pool.SegmentCount - t.cur) * h.pool.SegmentSize |
||||
if l < need { |
||||
need = l |
||||
} |
||||
// calculate missing bit to complete current open segment
|
||||
rest := h.pool.SegmentSize - t.offset |
||||
if need < rest { |
||||
rest = need |
||||
} |
||||
copy(t.segment[t.offset:], b[:rest]) |
||||
need -= rest |
||||
size := (t.offset + rest) % h.pool.SegmentSize |
||||
// read full segments and the last possibly partial segment
|
||||
for need > 0 { |
||||
// push all finished chunks we read
|
||||
if t.cur%2 == 0 { |
||||
copy(t.section, t.segment) |
||||
} else { |
||||
copy(t.section[h.pool.SegmentSize:], t.segment) |
||||
h.writeSection(t.cur, t.section) |
||||
} |
||||
size = h.pool.SegmentSize |
||||
if need < size { |
||||
size = need |
||||
} |
||||
copy(t.segment, b[rest:rest+size]) |
||||
need -= size |
||||
rest += size |
||||
t.cur++ |
||||
} |
||||
t.offset = size % h.pool.SegmentSize |
||||
return l, nil |
||||
} |
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (h *Hasher) Reset() { |
||||
h.getTree() |
||||
} |
||||
|
||||
// Hasher implements the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the length of the data subsumed under the hash, i.e., span
|
||||
func (h *Hasher) ResetWithLength(span []byte) { |
||||
h.Reset() |
||||
h.bmt.span = span |
||||
} |
||||
|
||||
// releaseTree gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (h *Hasher) releaseTree() { |
||||
t := h.bmt |
||||
if t != nil { |
||||
t.cur = 0 |
||||
t.offset = 0 |
||||
t.span = nil |
||||
t.hash = nil |
||||
h.bmt = nil |
||||
h.pool.release(t) |
||||
} |
||||
} |
||||
|
||||
// TODO: writeSegment writes the ith segment into the BMT tree
|
||||
// func (h *Hasher) writeSegment(i int, s []byte) {
|
||||
// go h.run(h.bmt.leaves[i/2], h.pool.hasher(), i%2 == 0, s)
|
||||
// }
|
||||
|
||||
// writeSection writes the hash of i/2-th segction into right level 1 node of the BMT tree
|
||||
func (h *Hasher) writeSection(i int, section []byte) { |
||||
n := h.bmt.leaves[i/2] |
||||
isLeft := n.isLeft |
||||
n = n.parent |
||||
bh := h.pool.hasher() |
||||
bh.Write(section) |
||||
go func() { |
||||
sum := bh.Sum(nil) |
||||
if n == nil { |
||||
h.bmt.result <- sum |
||||
return |
||||
} |
||||
h.run(n, bh, isLeft, sum) |
||||
}() |
||||
} |
||||
|
||||
// run pushes the data to the node
|
||||
// if it is the first of 2 sisters written the routine returns
|
||||
// if it is the second, it calculates the hash and writes it
|
||||
// to the parent node recursively
|
||||
func (h *Hasher) run(n *node, bh hash.Hash, isLeft bool, s []byte) { |
||||
for { |
||||
if isLeft { |
||||
n.left = s |
||||
} else { |
||||
n.right = s |
||||
} |
||||
// the child-thread first arriving will quit
|
||||
if n.toggle() { |
||||
return |
||||
} |
||||
// the second thread now can be sure both left and right children are written
|
||||
// it calculates the hash of left|right and take it to the next level
|
||||
bh.Reset() |
||||
bh.Write(n.left) |
||||
bh.Write(n.right) |
||||
s = bh.Sum(nil) |
||||
|
||||
// at the root of the bmt just write the result to the result channel
|
||||
if n.parent == nil { |
||||
h.bmt.result <- s |
||||
return |
||||
} |
||||
|
||||
// otherwise iterate on parent
|
||||
isLeft = n.isLeft |
||||
n = n.parent |
||||
} |
||||
} |
||||
|
||||
// finalise is following the path starting from the final datasegment to the
|
||||
// BMT root via parents
|
||||
// for unbalanced trees it fills in the missing right sister nodes using
|
||||
// the pool's lookup table for BMT subtree root hashes for all-zero sections
|
||||
func (h *Hasher) finalise(skip bool) { |
||||
t := h.bmt |
||||
isLeft := t.cur%2 == 0 |
||||
n := t.leaves[t.cur/2] |
||||
for level := 0; n != nil; level++ { |
||||
// when the final segment's path is going via left child node
|
||||
// we include an all-zero subtree hash for the right level and toggle the node.
|
||||
// when the path is going through right child node, nothing to do
|
||||
if isLeft && !skip { |
||||
n.right = h.pool.zerohashes[level] |
||||
n.toggle() |
||||
} |
||||
skip = false |
||||
isLeft = n.isLeft |
||||
n = n.parent |
||||
} |
||||
} |
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool
|
||||
func (h *Hasher) getTree() *tree { |
||||
if h.bmt != nil { |
||||
return h.bmt |
||||
} |
||||
t := h.pool.reserve() |
||||
h.bmt = t |
||||
return t |
||||
} |
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (n *node) toggle() bool { |
||||
return atomic.AddInt32(&n.state, 1)%2 == 1 |
||||
} |
||||
|
||||
func hashstr(b []byte) string { |
||||
end := len(b) |
||||
if end > 4 { |
||||
end = 4 |
||||
} |
||||
return fmt.Sprintf("%x", b[:end]) |
||||
} |
||||
|
||||
// calculateDepthFor calculates the depth (number of levels) in the BMT tree
|
||||
func calculateDepthFor(n int) (d int) { |
||||
c := 2 |
||||
for ; c < n; c *= 2 { |
||||
d++ |
||||
} |
||||
return d + 1 |
||||
} |
@ -0,0 +1,85 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt is a simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// There is extra check on reference hasher correctness in bmt_test.go
|
||||
// * TestRefHasher
|
||||
// * testBMTHasherCorrectness function
|
||||
package bmt |
||||
|
||||
import ( |
||||
"hash" |
||||
) |
||||
|
||||
// RefHasher is the non-optimized easy-to-read reference implementation of BMT
|
||||
type RefHasher struct { |
||||
maxDataLength int // c * hashSize, where c = 2 ^ ceil(log2(count)), where count = ceil(length / hashSize)
|
||||
sectionLength int // 2 * hashSize
|
||||
hasher hash.Hash // base hash func (Keccak256 SHA3)
|
||||
} |
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasherFunc, count int) *RefHasher { |
||||
h := hasher() |
||||
hashsize := h.Size() |
||||
c := 2 |
||||
for ; c < count; c *= 2 { |
||||
} |
||||
return &RefHasher{ |
||||
sectionLength: 2 * hashsize, |
||||
maxDataLength: c * hashsize, |
||||
hasher: h, |
||||
} |
||||
} |
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(data []byte) []byte { |
||||
// if data is shorter than the base length (maxDataLength), we provide padding with zeros
|
||||
d := make([]byte, rh.maxDataLength) |
||||
length := len(data) |
||||
if length > rh.maxDataLength { |
||||
length = rh.maxDataLength |
||||
} |
||||
copy(d, data[:length]) |
||||
return rh.hash(d, rh.maxDataLength) |
||||
} |
||||
|
||||
// data has length maxDataLength = segmentSize * 2^k
|
||||
// hash calls itself recursively on both halves of the given slice
|
||||
// concatenates the results, and returns the hash of that
|
||||
// if the length of d is 2 * segmentSize then just returns the hash of that section
|
||||
func (rh *RefHasher) hash(data []byte, length int) []byte { |
||||
var section []byte |
||||
if length == rh.sectionLength { |
||||
// section contains two data segments (d)
|
||||
section = data |
||||
} else { |
||||
// section contains hashes of left and right BMT subtreea
|
||||
// to be calculated by calling hash recursively on left and right half of d
|
||||
length /= 2 |
||||
section = append(rh.hash(data[:length], length), rh.hash(data[length:], length)...) |
||||
} |
||||
rh.hasher.Reset() |
||||
rh.hasher.Write(section) |
||||
s := rh.hasher.Sum(nil) |
||||
return s |
||||
} |
@ -0,0 +1,390 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt |
||||
|
||||
import ( |
||||
"bytes" |
||||
crand "crypto/rand" |
||||
"encoding/binary" |
||||
"fmt" |
||||
"io" |
||||
"math/rand" |
||||
"sync" |
||||
"sync/atomic" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3" |
||||
) |
||||
|
||||
// the actual data length generated (could be longer than max datalength of the BMT)
|
||||
const BufferSize = 4128 |
||||
|
||||
func sha3hash(data ...[]byte) []byte { |
||||
h := sha3.NewKeccak256() |
||||
for _, v := range data { |
||||
h.Write(v) |
||||
} |
||||
return h.Sum(nil) |
||||
} |
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// all data lengths between 0 and 256 bytes
|
||||
func TestRefHasher(t *testing.T) { |
||||
|
||||
// the test struct is used to specify the expected BMT hash for
|
||||
// segment counts between from and to and lengths from 1 to datalength
|
||||
type test struct { |
||||
from int |
||||
to int |
||||
expected func([]byte) []byte |
||||
} |
||||
|
||||
var tests []*test |
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3hash(data)
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 1, |
||||
to: 2, |
||||
expected: func(d []byte) []byte { |
||||
data := make([]byte, 64) |
||||
copy(data, d) |
||||
return sha3hash(data) |
||||
}, |
||||
}) |
||||
|
||||
// all lengths in [3,4] should be:
|
||||
//
|
||||
// sha3hash(
|
||||
// sha3hash(data[:64])
|
||||
// sha3hash(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 3, |
||||
to: 4, |
||||
expected: func(d []byte) []byte { |
||||
data := make([]byte, 128) |
||||
copy(data, d) |
||||
return sha3hash(sha3hash(data[:64]), sha3hash(data[64:])) |
||||
}, |
||||
}) |
||||
|
||||
// all segmentCounts in [5,8] should be:
|
||||
//
|
||||
// sha3hash(
|
||||
// sha3hash(
|
||||
// sha3hash(data[:64])
|
||||
// sha3hash(data[64:128])
|
||||
// )
|
||||
// sha3hash(
|
||||
// sha3hash(data[128:192])
|
||||
// sha3hash(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{ |
||||
from: 5, |
||||
to: 8, |
||||
expected: func(d []byte) []byte { |
||||
data := make([]byte, 256) |
||||
copy(data, d) |
||||
return sha3hash(sha3hash(sha3hash(data[:64]), sha3hash(data[64:128])), sha3hash(sha3hash(data[128:192]), sha3hash(data[192:]))) |
||||
}, |
||||
}) |
||||
|
||||
// run the tests
|
||||
for _, x := range tests { |
||||
for segmentCount := x.from; segmentCount <= x.to; segmentCount++ { |
||||
for length := 1; length <= segmentCount*32; length++ { |
||||
t.Run(fmt.Sprintf("%d_segments_%d_bytes", segmentCount, length), func(t *testing.T) { |
||||
data := make([]byte, length) |
||||
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF { |
||||
t.Fatal(err) |
||||
} |
||||
expected := x.expected(data) |
||||
actual := NewRefHasher(sha3.NewKeccak256, segmentCount).Hash(data) |
||||
if !bytes.Equal(actual, expected) { |
||||
t.Fatalf("expected %x, got %x", expected, actual) |
||||
} |
||||
}) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestHasherCorrectness(t *testing.T) { |
||||
err := testHasher(testBaseHasher) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
func testHasher(f func(BaseHasherFunc, []byte, int, int) error) error { |
||||
data := newData(BufferSize) |
||||
hasher := sha3.NewKeccak256 |
||||
size := hasher().Size() |
||||
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128} |
||||
|
||||
var err error |
||||
for _, count := range counts { |
||||
max := count * size |
||||
incr := 1 |
||||
for n := 1; n <= max; n += incr { |
||||
err = f(hasher, data, n, count) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Tests that the BMT hasher can be synchronously reused with poolsizes 1 and PoolSize
|
||||
func TestHasherReuse(t *testing.T) { |
||||
t.Run(fmt.Sprintf("poolsize_%d", 1), func(t *testing.T) { |
||||
testHasherReuse(1, t) |
||||
}) |
||||
t.Run(fmt.Sprintf("poolsize_%d", PoolSize), func(t *testing.T) { |
||||
testHasherReuse(PoolSize, t) |
||||
}) |
||||
} |
||||
|
||||
func testHasherReuse(poolsize int, t *testing.T) { |
||||
hasher := sha3.NewKeccak256 |
||||
pool := NewTreePool(hasher, SegmentCount, poolsize) |
||||
defer pool.Drain(0) |
||||
bmt := New(pool) |
||||
|
||||
for i := 0; i < 100; i++ { |
||||
data := newData(BufferSize) |
||||
n := rand.Intn(bmt.DataLength()) |
||||
err := testHasherCorrectness(bmt, hasher, data, n, SegmentCount) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Tests if pool can be cleanly reused even in concurrent use
|
||||
func TestBMTHasherConcurrentUse(t *testing.T) { |
||||
hasher := sha3.NewKeccak256 |
||||
pool := NewTreePool(hasher, SegmentCount, PoolSize) |
||||
defer pool.Drain(0) |
||||
cycles := 100 |
||||
errc := make(chan error) |
||||
|
||||
for i := 0; i < cycles; i++ { |
||||
go func() { |
||||
bmt := New(pool) |
||||
data := newData(BufferSize) |
||||
n := rand.Intn(bmt.DataLength()) |
||||
errc <- testHasherCorrectness(bmt, hasher, data, n, 128) |
||||
}() |
||||
} |
||||
LOOP: |
||||
for { |
||||
select { |
||||
case <-time.NewTimer(5 * time.Second).C: |
||||
t.Fatal("timed out") |
||||
case err := <-errc: |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
cycles-- |
||||
if cycles == 0 { |
||||
break LOOP |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// helper function that creates a tree pool
|
||||
func testBaseHasher(hasher BaseHasherFunc, d []byte, n, count int) error { |
||||
pool := NewTreePool(hasher, count, 1) |
||||
defer pool.Drain(0) |
||||
bmt := New(pool) |
||||
return testHasherCorrectness(bmt, hasher, d, n, count) |
||||
} |
||||
|
||||
// helper function that compares reference and optimised implementations on
|
||||
// correctness
|
||||
func testHasherCorrectness(bmt *Hasher, hasher BaseHasherFunc, d []byte, n, count int) (err error) { |
||||
span := make([]byte, 8) |
||||
if len(d) < n { |
||||
n = len(d) |
||||
} |
||||
binary.BigEndian.PutUint64(span, uint64(n)) |
||||
data := d[:n] |
||||
rbmt := NewRefHasher(hasher, count) |
||||
exp := sha3hash(span, rbmt.Hash(data)) |
||||
got := Hash(bmt, span, data) |
||||
if !bytes.Equal(got, exp) { |
||||
return fmt.Errorf("wrong hash: expected %x, got %x", exp, got) |
||||
} |
||||
return err |
||||
} |
||||
|
||||
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) } |
||||
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) } |
||||
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) } |
||||
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) } |
||||
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) } |
||||
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) } |
||||
|
||||
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) } |
||||
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) } |
||||
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) } |
||||
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) } |
||||
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) } |
||||
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) } |
||||
|
||||
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) } |
||||
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) } |
||||
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) } |
||||
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) } |
||||
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) } |
||||
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) } |
||||
|
||||
func BenchmarkBMTHasher_4k(t *testing.B) { benchmarkBMTHasher(4096, t) } |
||||
func BenchmarkBMTHasher_2k(t *testing.B) { benchmarkBMTHasher(4096/2, t) } |
||||
func BenchmarkBMTHasher_1k(t *testing.B) { benchmarkBMTHasher(4096/4, t) } |
||||
func BenchmarkBMTHasher_512b(t *testing.B) { benchmarkBMTHasher(4096/8, t) } |
||||
func BenchmarkBMTHasher_256b(t *testing.B) { benchmarkBMTHasher(4096/16, t) } |
||||
func BenchmarkBMTHasher_128b(t *testing.B) { benchmarkBMTHasher(4096/32, t) } |
||||
|
||||
func BenchmarkBMTHasherNoPool_4k(t *testing.B) { benchmarkBMTHasherPool(1, 4096, t) } |
||||
func BenchmarkBMTHasherNoPool_2k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/2, t) } |
||||
func BenchmarkBMTHasherNoPool_1k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/4, t) } |
||||
func BenchmarkBMTHasherNoPool_512b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/8, t) } |
||||
func BenchmarkBMTHasherNoPool_256b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/16, t) } |
||||
func BenchmarkBMTHasherNoPool_128b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/32, t) } |
||||
|
||||
func BenchmarkBMTHasherPool_4k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096, t) } |
||||
func BenchmarkBMTHasherPool_2k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/2, t) } |
||||
func BenchmarkBMTHasherPool_1k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/4, t) } |
||||
func BenchmarkBMTHasherPool_512b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/8, t) } |
||||
func BenchmarkBMTHasherPool_256b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/16, t) } |
||||
func BenchmarkBMTHasherPool_128b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/32, t) } |
||||
|
||||
// benchmarks simple sha3 hash on chunks
|
||||
func benchmarkSHA3(n int, t *testing.B) { |
||||
data := newData(n) |
||||
hasher := sha3.NewKeccak256 |
||||
h := hasher() |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
h.Reset() |
||||
h.Write(data) |
||||
h.Sum(nil) |
||||
} |
||||
} |
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n PoolSize each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(n int, t *testing.B) { |
||||
hasher := sha3.NewKeccak256 |
||||
hashSize := hasher().Size() |
||||
data := newData(hashSize) |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
count := int32((n-1)/hashSize + 1) |
||||
wg := sync.WaitGroup{} |
||||
wg.Add(PoolSize) |
||||
var i int32 |
||||
for j := 0; j < PoolSize; j++ { |
||||
go func() { |
||||
defer wg.Done() |
||||
h := hasher() |
||||
for atomic.AddInt32(&i, 1) < count { |
||||
h.Reset() |
||||
h.Write(data) |
||||
h.Sum(nil) |
||||
} |
||||
}() |
||||
} |
||||
wg.Wait() |
||||
} |
||||
} |
||||
|
||||
// benchmarks BMT Hasher
|
||||
func benchmarkBMTHasher(n int, t *testing.B) { |
||||
data := newData(n) |
||||
hasher := sha3.NewKeccak256 |
||||
pool := NewTreePool(hasher, SegmentCount, PoolSize) |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
bmt := New(pool) |
||||
Hash(bmt, nil, data) |
||||
} |
||||
} |
||||
|
||||
// benchmarks 100 concurrent bmt hashes with pool capacity
|
||||
func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) { |
||||
data := newData(n) |
||||
hasher := sha3.NewKeccak256 |
||||
pool := NewTreePool(hasher, SegmentCount, poolsize) |
||||
cycles := 100 |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
wg := sync.WaitGroup{} |
||||
for i := 0; i < t.N; i++ { |
||||
wg.Add(cycles) |
||||
for j := 0; j < cycles; j++ { |
||||
go func() { |
||||
defer wg.Done() |
||||
bmt := New(pool) |
||||
Hash(bmt, nil, data) |
||||
}() |
||||
} |
||||
wg.Wait() |
||||
} |
||||
} |
||||
|
||||
// benchmarks the reference hasher
|
||||
func benchmarkRefHasher(n int, t *testing.B) { |
||||
data := newData(n) |
||||
hasher := sha3.NewKeccak256 |
||||
rbmt := NewRefHasher(hasher, 128) |
||||
|
||||
t.ReportAllocs() |
||||
t.ResetTimer() |
||||
for i := 0; i < t.N; i++ { |
||||
rbmt.Hash(data) |
||||
} |
||||
} |
||||
|
||||
func newData(bufferSize int) []byte { |
||||
data := make([]byte, bufferSize) |
||||
_, err := io.ReadFull(crand.Reader, data) |
||||
if err != nil { |
||||
panic(err.Error()) |
||||
} |
||||
return data |
||||
} |
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,48 @@ |
||||
package log |
||||
|
||||
import ( |
||||
l "github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
) |
||||
|
||||
const ( |
||||
// CallDepth is set to 1 in order to influence to reported line number of
|
||||
// the log message with 1 skipped stack frame of calling l.Output()
|
||||
CallDepth = 1 |
||||
) |
||||
|
||||
// Warn is a convenient alias for log.Warn with stats
|
||||
func Warn(msg string, ctx ...interface{}) { |
||||
metrics.GetOrRegisterCounter("warn", nil).Inc(1) |
||||
l.Output(msg, l.LvlWarn, CallDepth, ctx...) |
||||
} |
||||
|
||||
// Error is a convenient alias for log.Error with stats
|
||||
func Error(msg string, ctx ...interface{}) { |
||||
metrics.GetOrRegisterCounter("error", nil).Inc(1) |
||||
l.Output(msg, l.LvlError, CallDepth, ctx...) |
||||
} |
||||
|
||||
// Crit is a convenient alias for log.Crit with stats
|
||||
func Crit(msg string, ctx ...interface{}) { |
||||
metrics.GetOrRegisterCounter("crit", nil).Inc(1) |
||||
l.Output(msg, l.LvlCrit, CallDepth, ctx...) |
||||
} |
||||
|
||||
// Info is a convenient alias for log.Info with stats
|
||||
func Info(msg string, ctx ...interface{}) { |
||||
metrics.GetOrRegisterCounter("info", nil).Inc(1) |
||||
l.Output(msg, l.LvlInfo, CallDepth, ctx...) |
||||
} |
||||
|
||||
// Debug is a convenient alias for log.Debug with stats
|
||||
func Debug(msg string, ctx ...interface{}) { |
||||
metrics.GetOrRegisterCounter("debug", nil).Inc(1) |
||||
l.Output(msg, l.LvlDebug, CallDepth, ctx...) |
||||
} |
||||
|
||||
// Trace is a convenient alias for log.Trace with stats
|
||||
func Trace(msg string, ctx ...interface{}) { |
||||
metrics.GetOrRegisterCounter("trace", nil).Inc(1) |
||||
l.Output(msg, l.LvlTrace, CallDepth, ctx...) |
||||
} |
@ -0,0 +1,92 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package multihash |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"errors" |
||||
"fmt" |
||||
) |
||||
|
||||
const ( |
||||
defaultMultihashLength = 32 |
||||
defaultMultihashTypeCode = 0x1b |
||||
) |
||||
|
||||
var ( |
||||
multihashTypeCode uint8 |
||||
MultihashLength = defaultMultihashLength |
||||
) |
||||
|
||||
func init() { |
||||
multihashTypeCode = defaultMultihashTypeCode |
||||
MultihashLength = defaultMultihashLength |
||||
} |
||||
|
||||
// check if valid swarm multihash
|
||||
func isSwarmMultihashType(code uint8) bool { |
||||
return code == multihashTypeCode |
||||
} |
||||
|
||||
// GetMultihashLength returns the digest length of the provided multihash
|
||||
// It will fail if the multihash is not a valid swarm mulithash
|
||||
func GetMultihashLength(data []byte) (int, int, error) { |
||||
cursor := 0 |
||||
typ, c := binary.Uvarint(data) |
||||
if c <= 0 { |
||||
return 0, 0, errors.New("unreadable hashtype field") |
||||
} |
||||
if !isSwarmMultihashType(uint8(typ)) { |
||||
return 0, 0, fmt.Errorf("hash code %x is not a swarm hashtype", typ) |
||||
} |
||||
cursor += c |
||||
hashlength, c := binary.Uvarint(data[cursor:]) |
||||
if c <= 0 { |
||||
return 0, 0, errors.New("unreadable length field") |
||||
} |
||||
cursor += c |
||||
|
||||
// we cheekily assume hashlength < maxint
|
||||
inthashlength := int(hashlength) |
||||
if len(data[c:]) < inthashlength { |
||||
return 0, 0, errors.New("length mismatch") |
||||
} |
||||
return inthashlength, cursor, nil |
||||
} |
||||
|
||||
// FromMulithash returns the digest portion of the multihash
|
||||
// It will fail if the multihash is not a valid swarm multihash
|
||||
func FromMultihash(data []byte) ([]byte, error) { |
||||
hashLength, _, err := GetMultihashLength(data) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return data[len(data)-hashLength:], nil |
||||
} |
||||
|
||||
// ToMulithash wraps the provided digest data with a swarm mulithash header
|
||||
func ToMultihash(hashData []byte) []byte { |
||||
buf := bytes.NewBuffer(nil) |
||||
b := make([]byte, 8) |
||||
c := binary.PutUvarint(b, uint64(multihashTypeCode)) |
||||
buf.Write(b[:c]) |
||||
c = binary.PutUvarint(b, uint64(len(hashData))) |
||||
buf.Write(b[:c]) |
||||
buf.Write(hashData) |
||||
return buf.Bytes() |
||||
} |
@ -0,0 +1,53 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package multihash |
||||
|
||||
import ( |
||||
"bytes" |
||||
"math/rand" |
||||
"testing" |
||||
) |
||||
|
||||
// parse multihash, and check that invalid multihashes fail
|
||||
func TestCheckMultihash(t *testing.T) { |
||||
hashbytes := make([]byte, 32) |
||||
c, err := rand.Read(hashbytes) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} else if c < 32 { |
||||
t.Fatal("short read") |
||||
} |
||||
|
||||
expected := ToMultihash(hashbytes) |
||||
|
||||
l, hl, _ := GetMultihashLength(expected) |
||||
if l != 32 { |
||||
t.Fatalf("expected length %d, got %d", 32, l) |
||||
} else if hl != 2 { |
||||
t.Fatalf("expected header length %d, got %d", 2, hl) |
||||
} |
||||
if _, _, err := GetMultihashLength(expected[1:]); err == nil { |
||||
t.Fatal("expected failure on corrupt header") |
||||
} |
||||
if _, _, err := GetMultihashLength(expected[:len(expected)-2]); err == nil { |
||||
t.Fatal("expected failure on short content") |
||||
} |
||||
dh, _ := FromMultihash(expected) |
||||
if !bytes.Equal(dh, hashbytes) { |
||||
t.Fatalf("expected content hash %x, got %x", hashbytes, dh) |
||||
} |
||||
} |
@ -0,0 +1,152 @@ |
||||
## Streaming |
||||
|
||||
Streaming is a new protocol of the swarm bzz bundle of protocols. |
||||
This protocol provides the basic logic for chunk-based data flow. |
||||
It implements simple retrieve requests and delivery using priority queue. |
||||
A data exchange stream is a directional flow of chunks between peers. |
||||
The source of datachunks is the upstream, the receiver is called the |
||||
downstream peer. Each streaming protocol defines an outgoing streamer |
||||
and an incoming streamer, the former installing on the upstream, |
||||
the latter on the downstream peer. |
||||
|
||||
Subscribe on StreamerPeer launches an incoming streamer that sends |
||||
a subscribe msg upstream. The streamer on the upstream peer |
||||
handles the subscribe msg by installing the relevant outgoing streamer |
||||
. The modules now engage in a process of upstream sending a sequence of hashes of |
||||
chunks downstream (OfferedHashesMsg). The downstream peer evaluates which hashes are needed |
||||
and get it delivered by sending back a msg (WantedHashesMsg). |
||||
|
||||
Historical syncing is supported - currently not the right abstraction -- |
||||
state kept across sessions by saving a series of intervals after their last |
||||
batch actually arrived. |
||||
|
||||
Live streaming is also supported, by starting session from the first item |
||||
after the subscription. |
||||
|
||||
Provable data exchange. In case a stream represents a swarm document's data layer |
||||
or higher level chunks, streaming up to a certain index is always provable. It saves on |
||||
sending intermediate chunks. |
||||
|
||||
Using the streamer logic, various stream types are easy to implement: |
||||
|
||||
* light node requests: |
||||
* url lookup with offset |
||||
* document download |
||||
* document upload |
||||
* syncing |
||||
* live session syncing |
||||
* historical syncing |
||||
* simple retrieve requests and deliveries |
||||
* mutable resource updates streams |
||||
* receipting for finger pointing |
||||
|
||||
## Syncing |
||||
|
||||
Syncing is the process that makes sure storer nodes end up storing all and only the chunks that are requested from them. |
||||
|
||||
### Requirements |
||||
|
||||
- eventual consistency: so each chunk historical should be syncable |
||||
- since the same chunk can and will arrive from many peers, (network traffic should be |
||||
optimised, only one transfer of data per chunk) |
||||
- explicit request deliveries should be prioritised higher than recent chunks received |
||||
during the ongoing session which in turn should be higher than historical chunks. |
||||
- insured chunks should get receipted for finger pointing litigation, the receipts storage |
||||
should be organised efficiently, upstream peer should also be able to find these |
||||
receipts for a deleted chunk easily to refute their challenge. |
||||
- syncing should be resilient to cut connections, metadata should be persisted that |
||||
keep track of syncing state across sessions, historical syncing state should survive restart |
||||
- extra data structures to support syncing should be kept at minimum |
||||
- syncing is organized separately for chunk types (resource update v content chunk) |
||||
- various types of streams should have common logic abstracted |
||||
|
||||
Syncing is now entirely mediated by the localstore, ie., no processes or memory leaks due to network contention. |
||||
When a new chunk is stored, its chunk hash is index by proximity bin |
||||
|
||||
peers syncronise by getting the chunks closer to the downstream peer than to the upstream one. |
||||
Consequently peers just sync all stored items for the kad bin the receiving peer falls into. |
||||
The special case of nearest neighbour sets is handled by the downstream peer |
||||
indicating they want to sync all kademlia bins with proximity equal to or higher |
||||
than their depth. |
||||
|
||||
This sync state represents the initial state of a sync connection session. |
||||
Retrieval is dictated by downstream peers simply using a special streamer protocol. |
||||
|
||||
Syncing chunks created during the session by the upstream peer is called live session syncing |
||||
while syncing of earlier chunks is historical syncing. |
||||
|
||||
Once the relevant chunk is retrieved, downstream peer looks up all hash segments in its localstore |
||||
and sends to the upstream peer a message with a a bitvector to indicate |
||||
missing chunks (e.g., for chunk `k`, hash with chunk internal index which case ) |
||||
new items. In turn upstream peer sends the relevant chunk data alongside their index. |
||||
|
||||
On sending chunks there is a priority queue system. If during looking up hashes in its localstore, |
||||
downstream peer hits on an open request then a retrieve request is sent immediately to the upstream peer indicating |
||||
that no extra round of checks is needed. If another peers syncer hits the same open request, it is slightly unsafe to not ask |
||||
that peer too: if the first one disconnects before delivering or fails to deliver and therefore gets |
||||
disconnected, we should still be able to continue with the other. The minimum redundant traffic coming from such simultaneous |
||||
eventualities should be sufficiently rare not to warrant more complex treatment. |
||||
|
||||
Session syncing involves downstream peer to request a new state on a bin from upstream. |
||||
using the new state, the range (of chunks) between the previous state and the new one are retrieved |
||||
and chunks are requested identical to the historical case. After receiving all the missing chunks |
||||
from the new hashes, downstream peer will request a new range. If this happens before upstream peer updates a new state, |
||||
we say that session syncing is live or the two peers are in sync. In general the time interval passed since downstream peer request up to the current session cursor is a good indication of a permanent (probably increasing) lag. |
||||
|
||||
If there is no historical backlog, and downstream peer has an acceptable 'last synced' tag, then it is said to be fully synced with the upstream peer. |
||||
If a peer is fully synced with all its storer peers, it can advertise itself as globally fully synced. |
||||
|
||||
The downstream peer persists the record of the last synced offset. When the two peers disconnect and |
||||
reconnect syncing can start from there. |
||||
This situation however can also happen while historical syncing is not yet complete. |
||||
Effectively this means that the peer needs to persist a record of an arbitrary array of offset ranges covered. |
||||
|
||||
### Delivery requests |
||||
|
||||
once the appropriate ranges of the hashstream are retrieved and buffered, downstream peer just scans the hashes, looks them up in localstore, if not found, create a request entry. |
||||
The range is referenced by the chunk index. Alongside the name (indicating the stream, e.g., content chunks for bin 6) and the range |
||||
downstream peer sends a 128 long bitvector indicating which chunks are needed. |
||||
Newly created requests are satisfied bound together in a waitgroup which when done, will promptt sending the next one. |
||||
to be able to do check and storage concurrently, we keep a buffer of one, we start with two batches of hashes. |
||||
If there is nothing to give, upstream peers SetNextBatch is blocking. Subscription ends with an unsubscribe. which removes the syncer from the map. |
||||
|
||||
Canceling requests (for instance the late chunks of an erasure batch) should be a chan closed |
||||
on the request |
||||
|
||||
Simple request is also a subscribe |
||||
different streaming protocols are different p2p protocols with same message types. |
||||
the constructor is the Run function itself. which takes a streamerpeer as argument |
||||
|
||||
|
||||
### provable streams |
||||
|
||||
The swarm hash over the hash stream has many advantages. It implements a provable data transfer |
||||
and provide efficient storage for receipts in the form of inclusion proofs useable for finger pointing litigation. |
||||
When challenged on a missing chunk, upstream peer will provide an inclusion proof of a chunk hash against the state of the |
||||
sync stream. In order to be able to generate such an inclusion proof, upstream peer needs to store the hash index (counting consecutive hash-size segments) alongside the chunk data and preserve it even when the chunk data is deleted until the chunk is no longer insured. |
||||
if there is no valid insurance on the files the entry may be deleted. |
||||
As long as the chunk is preserved, no takeover proof will be needed since the node can respond to any challenge. |
||||
However, once the node needs to delete an insured chunk for capacity reasons, a receipt should be available to |
||||
refute the challenge by finger pointing to a downstream peer. |
||||
As part of the deletion protocol then, hashes of insured chunks to be removed are pushed to an infinite stream for every bin. |
||||
|
||||
Downstream peer on the other hand needs to make sure that they can only be finger pointed about a chunk they did receive and store. |
||||
For this the check of a state should be exhaustive. If historical syncing finishes on one state, all hashes before are covered, no |
||||
surprises. In other words historical syncing this process is self verifying. With session syncing however, it is not enough to check going back covering the range from old offset to new. Continuity (i.e., that the new state is extension of the old) needs to be verified: after downstream peer reads the range into a buffer, it appends the buffer the last known state at the last known offset and verifies the resulting hash matches |
||||
the latest state. Past intervals of historical syncing are checked via the the session root. |
||||
Upstream peer signs the states, downstream peers can use as handover proofs. |
||||
Downstream peers sign off on a state together with an initial offset. |
||||
|
||||
Once historical syncing is complete and the session does not lag, downstream peer only preserves the latest upstream state and store the signed version. |
||||
|
||||
Upstream peer needs to keep the latest takeover states: each deleted chunk's hash should be covered by takeover proof of at least one peer. If historical syncing is complete, upstream peer typically will store only the latest takeover proof from downstream peer. |
||||
Crucially, the structure is totally independent of the number of peers in the bin, so it scales extremely well. |
||||
|
||||
## implementation |
||||
|
||||
The simplest protocol just involves upstream peer to prefix the key with the kademlia proximity order (say 0-15 or 0-31) |
||||
and simply iterate on index per bin when syncing with a peer. |
||||
|
||||
priority queues are used for sending chunks so that user triggered requests should be responded to first, session syncing second, and historical with lower priority. |
||||
The request on chunks remains implemented as a dataless entry in the memory store. |
||||
The lifecycle of this object should be more carefully thought through, ie., when it fails to retrieve it should be removed. |
@ -0,0 +1,66 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitvector |
||||
|
||||
import ( |
||||
"errors" |
||||
) |
||||
|
||||
var errInvalidLength = errors.New("invalid length") |
||||
|
||||
type BitVector struct { |
||||
len int |
||||
b []byte |
||||
} |
||||
|
||||
func New(l int) (bv *BitVector, err error) { |
||||
return NewFromBytes(make([]byte, l/8+1), l) |
||||
} |
||||
|
||||
func NewFromBytes(b []byte, l int) (bv *BitVector, err error) { |
||||
if l <= 0 { |
||||
return nil, errInvalidLength |
||||
} |
||||
if len(b)*8 < l { |
||||
return nil, errInvalidLength |
||||
} |
||||
return &BitVector{ |
||||
len: l, |
||||
b: b, |
||||
}, nil |
||||
} |
||||
|
||||
func (bv *BitVector) Get(i int) bool { |
||||
bi := i / 8 |
||||
return bv.b[bi]&(0x1<<uint(i%8)) != 0 |
||||
} |
||||
|
||||
func (bv *BitVector) Set(i int, v bool) { |
||||
bi := i / 8 |
||||
cv := bv.Get(i) |
||||
if cv != v { |
||||
bv.b[bi] ^= 0x1 << uint8(i%8) |
||||
} |
||||
} |
||||
|
||||
func (bv *BitVector) Bytes() []byte { |
||||
return bv.b |
||||
} |
||||
|
||||
func (bv *BitVector) Length() int { |
||||
return bv.len |
||||
} |
@ -0,0 +1,104 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitvector |
||||
|
||||
import "testing" |
||||
|
||||
func TestBitvectorNew(t *testing.T) { |
||||
_, err := New(0) |
||||
if err != errInvalidLength { |
||||
t.Errorf("expected err %v, got %v", errInvalidLength, err) |
||||
} |
||||
|
||||
_, err = NewFromBytes(nil, 0) |
||||
if err != errInvalidLength { |
||||
t.Errorf("expected err %v, got %v", errInvalidLength, err) |
||||
} |
||||
|
||||
_, err = NewFromBytes([]byte{0}, 9) |
||||
if err != errInvalidLength { |
||||
t.Errorf("expected err %v, got %v", errInvalidLength, err) |
||||
} |
||||
|
||||
_, err = NewFromBytes(make([]byte, 8), 8) |
||||
if err != nil { |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
func TestBitvectorGetSet(t *testing.T) { |
||||
for _, length := range []int{ |
||||
1, |
||||
2, |
||||
4, |
||||
8, |
||||
9, |
||||
15, |
||||
16, |
||||
} { |
||||
bv, err := New(length) |
||||
if err != nil { |
||||
t.Errorf("error for length %v: %v", length, err) |
||||
} |
||||
|
||||
for i := 0; i < length; i++ { |
||||
if bv.Get(i) { |
||||
t.Errorf("expected false for element on index %v", i) |
||||
} |
||||
} |
||||
|
||||
func() { |
||||
defer func() { |
||||
if err := recover(); err == nil { |
||||
t.Errorf("expecting panic") |
||||
} |
||||
}() |
||||
bv.Get(length + 8) |
||||
}() |
||||
|
||||
for i := 0; i < length; i++ { |
||||
bv.Set(i, true) |
||||
for j := 0; j < length; j++ { |
||||
if j == i { |
||||
if !bv.Get(j) { |
||||
t.Errorf("element on index %v is not set to true", i) |
||||
} |
||||
} else { |
||||
if bv.Get(j) { |
||||
t.Errorf("element on index %v is not false", i) |
||||
} |
||||
} |
||||
} |
||||
|
||||
bv.Set(i, false) |
||||
|
||||
if bv.Get(i) { |
||||
t.Errorf("element on index %v is not set to false", i) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestBitvectorNewFromBytesGet(t *testing.T) { |
||||
bv, err := NewFromBytes([]byte{8}, 8) |
||||
if err != nil { |
||||
t.Error(err) |
||||
} |
||||
if !bv.Get(3) { |
||||
t.Fatalf("element 3 is not set to true: state %08b", bv.b[0]) |
||||
} |
||||
} |
@ -0,0 +1,30 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"fmt" |
||||
"strings" |
||||
) |
||||
|
||||
func LogAddrs(nns [][]byte) string { |
||||
var nnsa []string |
||||
for _, nn := range nns { |
||||
nnsa = append(nnsa, fmt.Sprintf("%08x", nn[:4])) |
||||
} |
||||
return strings.Join(nnsa, ", ") |
||||
} |
@ -1,232 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
//metrics variables
|
||||
var ( |
||||
syncReceiveCount = metrics.NewRegisteredCounter("network.sync.recv.count", nil) |
||||
syncReceiveIgnore = metrics.NewRegisteredCounter("network.sync.recv.ignore", nil) |
||||
syncSendCount = metrics.NewRegisteredCounter("network.sync.send.count", nil) |
||||
syncSendRefused = metrics.NewRegisteredCounter("network.sync.send.refused", nil) |
||||
syncSendNotFound = metrics.NewRegisteredCounter("network.sync.send.notfound", nil) |
||||
) |
||||
|
||||
// Handler for storage/retrieval related protocol requests
|
||||
// implements the StorageHandler interface used by the bzz protocol
|
||||
type Depo struct { |
||||
hashfunc storage.SwarmHasher |
||||
localStore storage.ChunkStore |
||||
netStore storage.ChunkStore |
||||
} |
||||
|
||||
func NewDepo(hash storage.SwarmHasher, localStore, remoteStore storage.ChunkStore) *Depo { |
||||
return &Depo{ |
||||
hashfunc: hash, |
||||
localStore: localStore, |
||||
netStore: remoteStore, // entrypoint internal
|
||||
} |
||||
} |
||||
|
||||
// Handles UnsyncedKeysMsg after msg decoding - unsynced hashes upto sync state
|
||||
// * the remote sync state is just stored and handled in protocol
|
||||
// * filters through the new syncRequests and send the ones missing
|
||||
// * back immediately as a deliveryRequest message
|
||||
// * empty message just pings back for more (is this needed?)
|
||||
// * strict signed sync states may be needed.
|
||||
func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error { |
||||
unsynced := req.Unsynced |
||||
var missing []*syncRequest |
||||
var chunk *storage.Chunk |
||||
var err error |
||||
for _, req := range unsynced { |
||||
// skip keys that are found,
|
||||
chunk, err = self.localStore.Get(req.Key[:]) |
||||
if err != nil || chunk.SData == nil { |
||||
missing = append(missing, req) |
||||
} |
||||
} |
||||
log.Debug(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State)) |
||||
log.Trace(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v", unsynced)) |
||||
// send delivery request with missing keys
|
||||
err = p.deliveryRequest(missing) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// set peers state to persist
|
||||
p.syncState = req.State |
||||
return nil |
||||
} |
||||
|
||||
// Handles deliveryRequestMsg
|
||||
// * serves actual chunks asked by the remote peer
|
||||
// by pushing to the delivery queue (sync db) of the correct priority
|
||||
// (remote peer is free to reprioritize)
|
||||
// * the message implies remote peer wants more, so trigger for
|
||||
// * new outgoing unsynced keys message is fired
|
||||
func (self *Depo) HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error { |
||||
deliver := req.Deliver |
||||
// queue the actual delivery of a chunk ()
|
||||
log.Trace(fmt.Sprintf("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver)) |
||||
for _, sreq := range deliver { |
||||
// TODO: look up in cache here or in deliveries
|
||||
// priorities are taken from the message so the remote party can
|
||||
// reprioritise to at their leisure
|
||||
// r = self.pullCached(sreq.Key) // pulls and deletes from cache
|
||||
Push(p, sreq.Key, sreq.Priority) |
||||
} |
||||
|
||||
// sends it out as unsyncedKeysMsg
|
||||
p.syncer.sendUnsyncedKeys() |
||||
return nil |
||||
} |
||||
|
||||
// the entrypoint for store requests coming from the bzz wire protocol
|
||||
// if key found locally, return. otherwise
|
||||
// remote is untrusted, so hash is verified and chunk passed on to NetStore
|
||||
func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) { |
||||
var islocal bool |
||||
req.from = p |
||||
chunk, err := self.localStore.Get(req.Key) |
||||
switch { |
||||
case err != nil: |
||||
log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key)) |
||||
// not found in memory cache, ie., a genuine store request
|
||||
// create chunk
|
||||
syncReceiveCount.Inc(1) |
||||
chunk = storage.NewChunk(req.Key, nil) |
||||
|
||||
case chunk.SData == nil: |
||||
// found chunk in memory store, needs the data, validate now
|
||||
log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v. request entry found", req)) |
||||
|
||||
default: |
||||
// data is found, store request ignored
|
||||
// this should update access count?
|
||||
syncReceiveIgnore.Inc(1) |
||||
log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req)) |
||||
islocal = true |
||||
//return
|
||||
} |
||||
|
||||
hasher := self.hashfunc() |
||||
hasher.Write(req.SData) |
||||
if !bytes.Equal(hasher.Sum(nil), req.Key) { |
||||
// data does not validate, ignore
|
||||
// TODO: peer should be penalised/dropped?
|
||||
log.Warn(fmt.Sprintf("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req)) |
||||
return |
||||
} |
||||
|
||||
if islocal { |
||||
return |
||||
} |
||||
// update chunk with size and data
|
||||
chunk.SData = req.SData // protocol validates that SData is minimum 9 bytes long (int64 size + at least one byte of data)
|
||||
chunk.Size = int64(binary.LittleEndian.Uint64(req.SData[0:8])) |
||||
log.Trace(fmt.Sprintf("delivery of %v from %v", chunk, p)) |
||||
chunk.Source = p |
||||
self.netStore.Put(chunk) |
||||
} |
||||
|
||||
// entrypoint for retrieve requests coming from the bzz wire protocol
|
||||
// checks swap balance - return if peer has no credit
|
||||
func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) { |
||||
req.from = p |
||||
// swap - record credit for 1 request
|
||||
// note that only charge actual reqsearches
|
||||
var err error |
||||
if p.swap != nil { |
||||
err = p.swap.Add(1) |
||||
} |
||||
if err != nil { |
||||
log.Warn(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err)) |
||||
return |
||||
} |
||||
|
||||
// call storage.NetStore#Get which
|
||||
// blocks until local retrieval finished
|
||||
// launches cloud retrieval
|
||||
chunk, _ := self.netStore.Get(req.Key) |
||||
req = self.strategyUpdateRequest(chunk.Req, req) |
||||
// check if we can immediately deliver
|
||||
if chunk.SData != nil { |
||||
log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log())) |
||||
|
||||
if req.MaxSize == 0 || int64(req.MaxSize) >= chunk.Size { |
||||
sreq := &storeRequestMsgData{ |
||||
Id: req.Id, |
||||
Key: chunk.Key, |
||||
SData: chunk.SData, |
||||
requestTimeout: req.timeout, //
|
||||
} |
||||
syncSendCount.Inc(1) |
||||
p.syncer.addRequest(sreq, DeliverReq) |
||||
} else { |
||||
syncSendRefused.Inc(1) |
||||
log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log())) |
||||
} |
||||
} else { |
||||
syncSendNotFound.Inc(1) |
||||
log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log())) |
||||
} |
||||
} |
||||
|
||||
// add peer request the chunk and decides the timeout for the response if still searching
|
||||
func (self *Depo) strategyUpdateRequest(rs *storage.RequestStatus, origReq *retrieveRequestMsgData) (req *retrieveRequestMsgData) { |
||||
log.Trace(fmt.Sprintf("Depo.strategyUpdateRequest: key %v", origReq.Key.Log())) |
||||
// we do not create an alternative one
|
||||
req = origReq |
||||
if rs != nil { |
||||
self.addRequester(rs, req) |
||||
req.setTimeout(self.searchTimeout(rs, req)) |
||||
} |
||||
return |
||||
} |
||||
|
||||
// decides the timeout promise sent with the immediate peers response to a retrieve request
|
||||
// if timeout is explicitly set and expired
|
||||
func (self *Depo) searchTimeout(rs *storage.RequestStatus, req *retrieveRequestMsgData) (timeout *time.Time) { |
||||
reqt := req.getTimeout() |
||||
t := time.Now().Add(searchTimeout) |
||||
if reqt != nil && reqt.Before(t) { |
||||
return reqt |
||||
} else { |
||||
return &t |
||||
} |
||||
} |
||||
|
||||
/* |
||||
adds a new peer to an existing open request |
||||
only add if less than requesterCount peers forwarded the same request id so far |
||||
note this is done irrespective of status (searching or found) |
||||
*/ |
||||
func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) { |
||||
log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.Id)) |
||||
list := rs.Requesters[req.Id] |
||||
rs.Requesters[req.Id] = append(list, req) |
||||
} |
@ -0,0 +1,210 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sync" |
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/pot" |
||||
) |
||||
|
||||
// discovery bzz extension for requesting and relaying node address records
|
||||
|
||||
// discPeer wraps BzzPeer and embeds an Overlay connectivity driver
|
||||
type discPeer struct { |
||||
*BzzPeer |
||||
overlay Overlay |
||||
sentPeers bool // whether we already sent peer closer to this address
|
||||
mtx sync.RWMutex |
||||
peers map[string]bool // tracks node records sent to the peer
|
||||
depth uint8 // the proximity order advertised by remote as depth of saturation
|
||||
} |
||||
|
||||
// NewDiscovery constructs a discovery peer
|
||||
func newDiscovery(p *BzzPeer, o Overlay) *discPeer { |
||||
d := &discPeer{ |
||||
overlay: o, |
||||
BzzPeer: p, |
||||
peers: make(map[string]bool), |
||||
} |
||||
// record remote as seen so we never send a peer its own record
|
||||
d.seen(d) |
||||
return d |
||||
} |
||||
|
||||
// HandleMsg is the message handler that delegates incoming messages
|
||||
func (d *discPeer) HandleMsg(msg interface{}) error { |
||||
switch msg := msg.(type) { |
||||
|
||||
case *peersMsg: |
||||
return d.handlePeersMsg(msg) |
||||
|
||||
case *subPeersMsg: |
||||
return d.handleSubPeersMsg(msg) |
||||
|
||||
default: |
||||
return fmt.Errorf("unknown message type: %T", msg) |
||||
} |
||||
} |
||||
|
||||
// NotifyDepth sends a message to all connections if depth of saturation is changed
|
||||
func NotifyDepth(depth uint8, h Overlay) { |
||||
f := func(val OverlayConn, po int, _ bool) bool { |
||||
dp, ok := val.(*discPeer) |
||||
if ok { |
||||
dp.NotifyDepth(depth) |
||||
} |
||||
return true |
||||
} |
||||
h.EachConn(nil, 255, f) |
||||
} |
||||
|
||||
// NotifyPeer informs all peers about a newly added node
|
||||
func NotifyPeer(p OverlayAddr, k Overlay) { |
||||
f := func(val OverlayConn, po int, _ bool) bool { |
||||
dp, ok := val.(*discPeer) |
||||
if ok { |
||||
dp.NotifyPeer(p, uint8(po)) |
||||
} |
||||
return true |
||||
} |
||||
k.EachConn(p.Address(), 255, f) |
||||
} |
||||
|
||||
// NotifyPeer notifies the remote node (recipient) about a peer if
|
||||
// the peer's PO is within the recipients advertised depth
|
||||
// OR the peer is closer to the recipient than self
|
||||
// unless already notified during the connection session
|
||||
func (d *discPeer) NotifyPeer(a OverlayAddr, po uint8) { |
||||
// immediately return
|
||||
if (po < d.getDepth() && pot.ProxCmp(d.localAddr, d, a) != 1) || d.seen(a) { |
||||
return |
||||
} |
||||
// log.Trace(fmt.Sprintf("%08x peer %08x notified of peer %08x", d.localAddr.Over()[:4], d.Address()[:4], a.Address()[:4]))
|
||||
resp := &peersMsg{ |
||||
Peers: []*BzzAddr{ToAddr(a)}, |
||||
} |
||||
go d.Send(resp) |
||||
} |
||||
|
||||
// NotifyDepth sends a subPeers Msg to the receiver notifying them about
|
||||
// a change in the depth of saturation
|
||||
func (d *discPeer) NotifyDepth(po uint8) { |
||||
// log.Trace(fmt.Sprintf("%08x peer %08x notified of new depth %v", d.localAddr.Over()[:4], d.Address()[:4], po))
|
||||
go d.Send(&subPeersMsg{Depth: po}) |
||||
} |
||||
|
||||
/* |
||||
peersMsg is the message to pass peer information |
||||
It is always a response to a peersRequestMsg |
||||
|
||||
The encoding of a peer address is identical the devp2p base protocol peers |
||||
messages: [IP, Port, NodeID], |
||||
Note that a node's FileStore address is not the NodeID but the hash of the NodeID. |
||||
|
||||
TODO: |
||||
To mitigate against spurious peers messages, requests should be remembered |
||||
and correctness of responses should be checked |
||||
|
||||
If the proxBin of peers in the response is incorrect the sender should be |
||||
disconnected |
||||
*/ |
||||
|
||||
// peersMsg encapsulates an array of peer addresses
|
||||
// used for communicating about known peers
|
||||
// relevant for bootstrapping connectivity and updating peersets
|
||||
type peersMsg struct { |
||||
Peers []*BzzAddr |
||||
} |
||||
|
||||
// String pretty prints a peersMsg
|
||||
func (msg peersMsg) String() string { |
||||
return fmt.Sprintf("%T: %v", msg, msg.Peers) |
||||
} |
||||
|
||||
// handlePeersMsg called by the protocol when receiving peerset (for target address)
|
||||
// list of nodes ([]PeerAddr in peersMsg) is added to the overlay db using the
|
||||
// Register interface method
|
||||
func (d *discPeer) handlePeersMsg(msg *peersMsg) error { |
||||
// register all addresses
|
||||
if len(msg.Peers) == 0 { |
||||
return nil |
||||
} |
||||
|
||||
for _, a := range msg.Peers { |
||||
d.seen(a) |
||||
NotifyPeer(a, d.overlay) |
||||
} |
||||
return d.overlay.Register(toOverlayAddrs(msg.Peers...)) |
||||
} |
||||
|
||||
// subPeers msg is communicating the depth/sharpness/focus of the overlay table of a peer
|
||||
type subPeersMsg struct { |
||||
Depth uint8 |
||||
} |
||||
|
||||
// String returns the pretty printer
|
||||
func (msg subPeersMsg) String() string { |
||||
return fmt.Sprintf("%T: request peers > PO%02d. ", msg, msg.Depth) |
||||
} |
||||
|
||||
func (d *discPeer) handleSubPeersMsg(msg *subPeersMsg) error { |
||||
if !d.sentPeers { |
||||
d.setDepth(msg.Depth) |
||||
var peers []*BzzAddr |
||||
d.overlay.EachConn(d.Over(), 255, func(p OverlayConn, po int, isproxbin bool) bool { |
||||
if pob, _ := pof(d, d.localAddr, 0); pob > po { |
||||
return false |
||||
} |
||||
if !d.seen(p) { |
||||
peers = append(peers, ToAddr(p.Off())) |
||||
} |
||||
return true |
||||
}) |
||||
if len(peers) > 0 { |
||||
// log.Debug(fmt.Sprintf("%08x: %v peers sent to %v", d.overlay.BaseAddr(), len(peers), d))
|
||||
go d.Send(&peersMsg{Peers: peers}) |
||||
} |
||||
} |
||||
d.sentPeers = true |
||||
return nil |
||||
} |
||||
|
||||
// seen takes an Overlay peer and checks if it was sent to a peer already
|
||||
// if not, marks the peer as sent
|
||||
func (d *discPeer) seen(p OverlayPeer) bool { |
||||
d.mtx.Lock() |
||||
defer d.mtx.Unlock() |
||||
k := string(p.Address()) |
||||
if d.peers[k] { |
||||
return true |
||||
} |
||||
d.peers[k] = true |
||||
return false |
||||
} |
||||
|
||||
func (d *discPeer) getDepth() uint8 { |
||||
d.mtx.RLock() |
||||
defer d.mtx.RUnlock() |
||||
return d.depth |
||||
} |
||||
func (d *discPeer) setDepth(depth uint8) { |
||||
d.mtx.Lock() |
||||
defer d.mtx.Unlock() |
||||
d.depth = depth |
||||
} |
@ -0,0 +1,57 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"testing" |
||||
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing" |
||||
) |
||||
|
||||
/*** |
||||
* |
||||
* - after connect, that outgoing subpeersmsg is sent |
||||
* |
||||
*/ |
||||
func TestDiscovery(t *testing.T) { |
||||
params := NewHiveParams() |
||||
s, pp := newHiveTester(t, params, 1, nil) |
||||
|
||||
id := s.IDs[0] |
||||
raddr := NewAddrFromNodeID(id) |
||||
pp.Register([]OverlayAddr{OverlayAddr(raddr)}) |
||||
|
||||
// start the hive and wait for the connection
|
||||
pp.Start(s.Server) |
||||
defer pp.Stop() |
||||
|
||||
// send subPeersMsg to the peer
|
||||
err := s.TestExchanges(p2ptest.Exchange{ |
||||
Label: "outgoing subPeersMsg", |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 1, |
||||
Msg: &subPeersMsg{Depth: 0}, |
||||
Peer: id, |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
@ -1,150 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/rand" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
const requesterCount = 3 |
||||
|
||||
/* |
||||
forwarder implements the CloudStore interface (use by storage.NetStore) |
||||
and serves as the cloud store backend orchestrating storage/retrieval/delivery |
||||
via the native bzz protocol |
||||
which uses an MSB logarithmic distance-based semi-permanent Kademlia table for |
||||
* recursive forwarding style routing for retrieval |
||||
* smart syncronisation |
||||
*/ |
||||
|
||||
type forwarder struct { |
||||
hive *Hive |
||||
} |
||||
|
||||
func NewForwarder(hive *Hive) *forwarder { |
||||
return &forwarder{hive: hive} |
||||
} |
||||
|
||||
// generate a unique id uint64
|
||||
func generateId() uint64 { |
||||
r := rand.New(rand.NewSource(time.Now().UnixNano())) |
||||
return uint64(r.Int63()) |
||||
} |
||||
|
||||
var searchTimeout = 3 * time.Second |
||||
|
||||
// forwarding logic
|
||||
// logic propagating retrieve requests to peers given by the kademlia hive
|
||||
func (self *forwarder) Retrieve(chunk *storage.Chunk) { |
||||
peers := self.hive.getPeers(chunk.Key, 0) |
||||
log.Trace(fmt.Sprintf("forwarder.Retrieve: %v - received %d peers from KΛÐΞMLIΛ...", chunk.Key.Log(), len(peers))) |
||||
OUT: |
||||
for _, p := range peers { |
||||
log.Trace(fmt.Sprintf("forwarder.Retrieve: sending retrieveRequest %v to peer [%v]", chunk.Key.Log(), p)) |
||||
for _, recipients := range chunk.Req.Requesters { |
||||
for _, recipient := range recipients { |
||||
req := recipient.(*retrieveRequestMsgData) |
||||
if req.from.Addr() == p.Addr() { |
||||
continue OUT |
||||
} |
||||
} |
||||
} |
||||
req := &retrieveRequestMsgData{ |
||||
Key: chunk.Key, |
||||
Id: generateId(), |
||||
} |
||||
var err error |
||||
if p.swap != nil { |
||||
err = p.swap.Add(-1) |
||||
} |
||||
if err == nil { |
||||
p.retrieve(req) |
||||
break OUT |
||||
} |
||||
log.Warn(fmt.Sprintf("forwarder.Retrieve: unable to send retrieveRequest to peer [%v]: %v", chunk.Key.Log(), err)) |
||||
} |
||||
} |
||||
|
||||
// requests to specific peers given by the kademlia hive
|
||||
// except for peers that the store request came from (if any)
|
||||
// delivery queueing taken care of by syncer
|
||||
func (self *forwarder) Store(chunk *storage.Chunk) { |
||||
var n int |
||||
msg := &storeRequestMsgData{ |
||||
Key: chunk.Key, |
||||
SData: chunk.SData, |
||||
} |
||||
var source *peer |
||||
if chunk.Source != nil { |
||||
source = chunk.Source.(*peer) |
||||
} |
||||
for _, p := range self.hive.getPeers(chunk.Key, 0) { |
||||
log.Trace(fmt.Sprintf("forwarder.Store: %v %v", p, chunk)) |
||||
|
||||
if p.syncer != nil && (source == nil || p.Addr() != source.Addr()) { |
||||
n++ |
||||
Deliver(p, msg, PropagateReq) |
||||
} |
||||
} |
||||
log.Trace(fmt.Sprintf("forwarder.Store: sent to %v peers (chunk = %v)", n, chunk)) |
||||
} |
||||
|
||||
// once a chunk is found deliver it to its requesters unless timed out
|
||||
func (self *forwarder) Deliver(chunk *storage.Chunk) { |
||||
// iterate over request entries
|
||||
for id, requesters := range chunk.Req.Requesters { |
||||
counter := requesterCount |
||||
msg := &storeRequestMsgData{ |
||||
Key: chunk.Key, |
||||
SData: chunk.SData, |
||||
} |
||||
var n int |
||||
var req *retrieveRequestMsgData |
||||
// iterate over requesters with the same id
|
||||
for id, r := range requesters { |
||||
req = r.(*retrieveRequestMsgData) |
||||
if req.timeout == nil || req.timeout.After(time.Now()) { |
||||
log.Trace(fmt.Sprintf("forwarder.Deliver: %v -> %v", req.Id, req.from)) |
||||
msg.Id = uint64(id) |
||||
Deliver(req.from, msg, DeliverReq) |
||||
n++ |
||||
counter-- |
||||
if counter <= 0 { |
||||
break |
||||
} |
||||
} |
||||
} |
||||
log.Trace(fmt.Sprintf("forwarder.Deliver: submit chunk %v (request id %v) for delivery to %v peers", chunk.Key.Log(), id, n)) |
||||
} |
||||
} |
||||
|
||||
// initiate delivery of a chunk to a particular peer via syncer#addRequest
|
||||
// depending on syncer mode and priority settings and sync request type
|
||||
// this either goes via confirmation roundtrip or queued or pushed directly
|
||||
func Deliver(p *peer, req interface{}, ty int) { |
||||
p.syncer.addRequest(req, ty) |
||||
} |
||||
|
||||
// push chunk over to peer
|
||||
func Push(p *peer, key storage.Key, priority uint) { |
||||
p.syncer.doDelivery(key, priority, p.syncer.quit) |
||||
} |
@ -0,0 +1,108 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"io/ioutil" |
||||
"log" |
||||
"os" |
||||
"testing" |
||||
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing" |
||||
"github.com/ethereum/go-ethereum/swarm/state" |
||||
) |
||||
|
||||
func newHiveTester(t *testing.T, params *HiveParams, n int, store state.Store) (*bzzTester, *Hive) { |
||||
// setup
|
||||
addr := RandomAddr() // tested peers peer address
|
||||
to := NewKademlia(addr.OAddr, NewKadParams()) |
||||
pp := NewHive(params, to, store) // hive
|
||||
|
||||
return newBzzBaseTester(t, n, addr, DiscoverySpec, pp.Run), pp |
||||
} |
||||
|
||||
func TestRegisterAndConnect(t *testing.T) { |
||||
params := NewHiveParams() |
||||
s, pp := newHiveTester(t, params, 1, nil) |
||||
|
||||
id := s.IDs[0] |
||||
raddr := NewAddrFromNodeID(id) |
||||
pp.Register([]OverlayAddr{OverlayAddr(raddr)}) |
||||
|
||||
// start the hive and wait for the connection
|
||||
err := pp.Start(s.Server) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer pp.Stop() |
||||
// retrieve and broadcast
|
||||
err = s.TestDisconnected(&p2ptest.Disconnect{ |
||||
Peer: s.IDs[0], |
||||
Error: nil, |
||||
}) |
||||
|
||||
if err == nil || err.Error() != "timed out waiting for peers to disconnect" { |
||||
t.Fatalf("expected peer to connect") |
||||
} |
||||
} |
||||
|
||||
func TestHiveStatePersistance(t *testing.T) { |
||||
log.SetOutput(os.Stdout) |
||||
|
||||
dir, err := ioutil.TempDir("", "hive_test_store") |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
|
||||
store, err := state.NewDBStore(dir) //start the hive with an empty dbstore
|
||||
|
||||
params := NewHiveParams() |
||||
s, pp := newHiveTester(t, params, 5, store) |
||||
|
||||
peers := make(map[string]bool) |
||||
for _, id := range s.IDs { |
||||
raddr := NewAddrFromNodeID(id) |
||||
pp.Register([]OverlayAddr{OverlayAddr(raddr)}) |
||||
peers[raddr.String()] = true |
||||
} |
||||
|
||||
// start the hive and wait for the connection
|
||||
err = pp.Start(s.Server) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
pp.Stop() |
||||
store.Close() |
||||
|
||||
persistedStore, err := state.NewDBStore(dir) //start the hive with an empty dbstore
|
||||
|
||||
s1, pp := newHiveTester(t, params, 1, persistedStore) |
||||
|
||||
//start the hive and wait for the connection
|
||||
|
||||
pp.Start(s1.Server) |
||||
i := 0 |
||||
pp.Overlay.EachAddr(nil, 256, func(addr OverlayAddr, po int, nn bool) bool { |
||||
delete(peers, addr.(*BzzAddr).String()) |
||||
i++ |
||||
return true |
||||
}) |
||||
if len(peers) != 0 || i != 5 { |
||||
t.Fatalf("invalid peers loaded") |
||||
} |
||||
} |
@ -0,0 +1,765 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"math/rand" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
"github.com/ethereum/go-ethereum/swarm/pot" |
||||
) |
||||
|
||||
/* |
||||
|
||||
Taking the proximity order relative to a fix point x classifies the points in |
||||
the space (n byte long byte sequences) into bins. Items in each are at |
||||
most half as distant from x as items in the previous bin. Given a sample of |
||||
uniformly distributed items (a hash function over arbitrary sequence) the |
||||
proximity scale maps onto series of subsets with cardinalities on a negative |
||||
exponential scale. |
||||
|
||||
It also has the property that any two item belonging to the same bin are at |
||||
most half as distant from each other as they are from x. |
||||
|
||||
If we think of random sample of items in the bins as connections in a network of |
||||
interconnected nodes then relative proximity can serve as the basis for local |
||||
decisions for graph traversal where the task is to find a route between two |
||||
points. Since in every hop, the finite distance halves, there is |
||||
a guaranteed constant maximum limit on the number of hops needed to reach one |
||||
node from the other. |
||||
*/ |
||||
|
||||
var pof = pot.DefaultPof(256) |
||||
|
||||
// KadParams holds the config params for Kademlia
|
||||
type KadParams struct { |
||||
// adjustable parameters
|
||||
MaxProxDisplay int // number of rows the table shows
|
||||
MinProxBinSize int // nearest neighbour core minimum cardinality
|
||||
MinBinSize int // minimum number of peers in a row
|
||||
MaxBinSize int // maximum number of peers in a row before pruning
|
||||
RetryInterval int64 // initial interval before a peer is first redialed
|
||||
RetryExponent int // exponent to multiply retry intervals with
|
||||
MaxRetries int // maximum number of redial attempts
|
||||
// function to sanction or prevent suggesting a peer
|
||||
Reachable func(OverlayAddr) bool |
||||
} |
||||
|
||||
// NewKadParams returns a params struct with default values
|
||||
func NewKadParams() *KadParams { |
||||
return &KadParams{ |
||||
MaxProxDisplay: 16, |
||||
MinProxBinSize: 2, |
||||
MinBinSize: 2, |
||||
MaxBinSize: 4, |
||||
RetryInterval: 4200000000, // 4.2 sec
|
||||
MaxRetries: 42, |
||||
RetryExponent: 2, |
||||
} |
||||
} |
||||
|
||||
// Kademlia is a table of live peers and a db of known peers (node records)
|
||||
type Kademlia struct { |
||||
lock sync.RWMutex |
||||
*KadParams // Kademlia configuration parameters
|
||||
base []byte // immutable baseaddress of the table
|
||||
addrs *pot.Pot // pots container for known peer addresses
|
||||
conns *pot.Pot // pots container for live peer connections
|
||||
depth uint8 // stores the last current depth of saturation
|
||||
nDepth int // stores the last neighbourhood depth
|
||||
nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
|
||||
addrCountC chan int // returned by AddrCountC function to signal peer count change
|
||||
} |
||||
|
||||
// NewKademlia creates a Kademlia table for base address addr
|
||||
// with parameters as in params
|
||||
// if params is nil, it uses default values
|
||||
func NewKademlia(addr []byte, params *KadParams) *Kademlia { |
||||
if params == nil { |
||||
params = NewKadParams() |
||||
} |
||||
return &Kademlia{ |
||||
base: addr, |
||||
KadParams: params, |
||||
addrs: pot.NewPot(nil, 0), |
||||
conns: pot.NewPot(nil, 0), |
||||
} |
||||
} |
||||
|
||||
// OverlayPeer interface captures the common aspect of view of a peer from the Overlay
|
||||
// topology driver
|
||||
type OverlayPeer interface { |
||||
Address() []byte |
||||
} |
||||
|
||||
// OverlayConn represents a connected peer
|
||||
type OverlayConn interface { |
||||
OverlayPeer |
||||
Drop(error) // call to indicate a peer should be expunged
|
||||
Off() OverlayAddr // call to return a persitent OverlayAddr
|
||||
} |
||||
|
||||
// OverlayAddr represents a kademlia peer record
|
||||
type OverlayAddr interface { |
||||
OverlayPeer |
||||
Update(OverlayAddr) OverlayAddr // returns the updated version of the original
|
||||
} |
||||
|
||||
// entry represents a Kademlia table entry (an extension of OverlayPeer)
|
||||
type entry struct { |
||||
OverlayPeer |
||||
seenAt time.Time |
||||
retries int |
||||
} |
||||
|
||||
// newEntry creates a kademlia peer from an OverlayPeer interface
|
||||
func newEntry(p OverlayPeer) *entry { |
||||
return &entry{ |
||||
OverlayPeer: p, |
||||
seenAt: time.Now(), |
||||
} |
||||
} |
||||
|
||||
// Bin is the binary (bitvector) serialisation of the entry address
|
||||
func (e *entry) Bin() string { |
||||
return pot.ToBin(e.addr().Address()) |
||||
} |
||||
|
||||
// Label is a short tag for the entry for debug
|
||||
func Label(e *entry) string { |
||||
return fmt.Sprintf("%s (%d)", e.Hex()[:4], e.retries) |
||||
} |
||||
|
||||
// Hex is the hexadecimal serialisation of the entry address
|
||||
func (e *entry) Hex() string { |
||||
return fmt.Sprintf("%x", e.addr().Address()) |
||||
} |
||||
|
||||
// String is the short tag for the entry
|
||||
func (e *entry) String() string { |
||||
return fmt.Sprintf("%s (%d)", e.Hex()[:8], e.retries) |
||||
} |
||||
|
||||
// addr returns the kad peer record (OverlayAddr) corresponding to the entry
|
||||
func (e *entry) addr() OverlayAddr { |
||||
a, _ := e.OverlayPeer.(OverlayAddr) |
||||
return a |
||||
} |
||||
|
||||
// conn returns the connected peer (OverlayPeer) corresponding to the entry
|
||||
func (e *entry) conn() OverlayConn { |
||||
c, _ := e.OverlayPeer.(OverlayConn) |
||||
return c |
||||
} |
||||
|
||||
// Register enters each OverlayAddr as kademlia peer record into the
|
||||
// database of known peer addresses
|
||||
func (k *Kademlia) Register(peers []OverlayAddr) error { |
||||
k.lock.Lock() |
||||
defer k.lock.Unlock() |
||||
var known, size int |
||||
for _, p := range peers { |
||||
// error if self received, peer should know better
|
||||
// and should be punished for this
|
||||
if bytes.Equal(p.Address(), k.base) { |
||||
return fmt.Errorf("add peers: %x is self", k.base) |
||||
} |
||||
var found bool |
||||
k.addrs, _, found, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val { |
||||
// if not found
|
||||
if v == nil { |
||||
// insert new offline peer into conns
|
||||
return newEntry(p) |
||||
} |
||||
// found among known peers, do nothing
|
||||
return v |
||||
}) |
||||
if found { |
||||
known++ |
||||
} |
||||
size++ |
||||
} |
||||
// send new address count value only if there are new addresses
|
||||
if k.addrCountC != nil && size-known > 0 { |
||||
k.addrCountC <- k.addrs.Size() |
||||
} |
||||
// log.Trace(fmt.Sprintf("%x registered %v peers, %v known, total: %v", k.BaseAddr()[:4], size, known, k.addrs.Size()))
|
||||
|
||||
k.sendNeighbourhoodDepthChange() |
||||
return nil |
||||
} |
||||
|
||||
// SuggestPeer returns a known peer for the lowest proximity bin for the
|
||||
// lowest bincount below depth
|
||||
// naturally if there is an empty row it returns a peer for that
|
||||
func (k *Kademlia) SuggestPeer() (a OverlayAddr, o int, want bool) { |
||||
k.lock.Lock() |
||||
defer k.lock.Unlock() |
||||
minsize := k.MinBinSize |
||||
depth := k.neighbourhoodDepth() |
||||
// if there is a callable neighbour within the current proxBin, connect
|
||||
// this makes sure nearest neighbour set is fully connected
|
||||
var ppo int |
||||
k.addrs.EachNeighbour(k.base, pof, func(val pot.Val, po int) bool { |
||||
if po < depth { |
||||
return false |
||||
} |
||||
a = k.callable(val) |
||||
ppo = po |
||||
return a == nil |
||||
}) |
||||
if a != nil { |
||||
log.Trace(fmt.Sprintf("%08x candidate nearest neighbour found: %v (%v)", k.BaseAddr()[:4], a, ppo)) |
||||
return a, 0, false |
||||
} |
||||
// log.Trace(fmt.Sprintf("%08x no candidate nearest neighbours to connect to (Depth: %v, minProxSize: %v) %#v", k.BaseAddr()[:4], depth, k.MinProxBinSize, a))
|
||||
|
||||
var bpo []int |
||||
prev := -1 |
||||
k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool { |
||||
prev++ |
||||
for ; prev < po; prev++ { |
||||
bpo = append(bpo, prev) |
||||
minsize = 0 |
||||
} |
||||
if size < minsize { |
||||
bpo = append(bpo, po) |
||||
minsize = size |
||||
} |
||||
return size > 0 && po < depth |
||||
}) |
||||
// all buckets are full, ie., minsize == k.MinBinSize
|
||||
if len(bpo) == 0 { |
||||
// log.Debug(fmt.Sprintf("%08x: all bins saturated", k.BaseAddr()[:4]))
|
||||
return nil, 0, false |
||||
} |
||||
// as long as we got candidate peers to connect to
|
||||
// dont ask for new peers (want = false)
|
||||
// try to select a candidate peer
|
||||
// find the first callable peer
|
||||
nxt := bpo[0] |
||||
k.addrs.EachBin(k.base, pof, nxt, func(po, _ int, f func(func(pot.Val, int) bool) bool) bool { |
||||
// for each bin (up until depth) we find callable candidate peers
|
||||
if po >= depth { |
||||
return false |
||||
} |
||||
return f(func(val pot.Val, _ int) bool { |
||||
a = k.callable(val) |
||||
return a == nil |
||||
}) |
||||
}) |
||||
// found a candidate
|
||||
if a != nil { |
||||
return a, 0, false |
||||
} |
||||
// no candidate peer found, request for the short bin
|
||||
var changed bool |
||||
if uint8(nxt) < k.depth { |
||||
k.depth = uint8(nxt) |
||||
changed = true |
||||
} |
||||
return a, nxt, changed |
||||
} |
||||
|
||||
// On inserts the peer as a kademlia peer into the live peers
|
||||
func (k *Kademlia) On(p OverlayConn) (uint8, bool) { |
||||
k.lock.Lock() |
||||
defer k.lock.Unlock() |
||||
e := newEntry(p) |
||||
var ins bool |
||||
k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(v pot.Val) pot.Val { |
||||
// if not found live
|
||||
if v == nil { |
||||
ins = true |
||||
// insert new online peer into conns
|
||||
return e |
||||
} |
||||
// found among live peers, do nothing
|
||||
return v |
||||
}) |
||||
if ins { |
||||
// insert new online peer into addrs
|
||||
k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val { |
||||
return e |
||||
}) |
||||
// send new address count value only if the peer is inserted
|
||||
if k.addrCountC != nil { |
||||
k.addrCountC <- k.addrs.Size() |
||||
} |
||||
} |
||||
log.Trace(k.string()) |
||||
// calculate if depth of saturation changed
|
||||
depth := uint8(k.saturation(k.MinBinSize)) |
||||
var changed bool |
||||
if depth != k.depth { |
||||
changed = true |
||||
k.depth = depth |
||||
} |
||||
k.sendNeighbourhoodDepthChange() |
||||
return k.depth, changed |
||||
} |
||||
|
||||
// NeighbourhoodDepthC returns the channel that sends a new kademlia
|
||||
// neighbourhood depth on each change.
|
||||
// Not receiving from the returned channel will block On function
|
||||
// when the neighbourhood depth is changed.
|
||||
func (k *Kademlia) NeighbourhoodDepthC() <-chan int { |
||||
if k.nDepthC == nil { |
||||
k.nDepthC = make(chan int) |
||||
} |
||||
return k.nDepthC |
||||
} |
||||
|
||||
// sendNeighbourhoodDepthChange sends new neighbourhood depth to k.nDepth channel
|
||||
// if it is initialized.
|
||||
func (k *Kademlia) sendNeighbourhoodDepthChange() { |
||||
// nDepthC is initialized when NeighbourhoodDepthC is called and returned by it.
|
||||
// It provides signaling of neighbourhood depth change.
|
||||
// This part of the code is sending new neighbourhood depth to nDepthC if that condition is met.
|
||||
if k.nDepthC != nil { |
||||
nDepth := k.neighbourhoodDepth() |
||||
if nDepth != k.nDepth { |
||||
k.nDepth = nDepth |
||||
k.nDepthC <- nDepth |
||||
} |
||||
} |
||||
} |
||||
|
||||
// AddrCountC returns the channel that sends a new
|
||||
// address count value on each change.
|
||||
// Not receiving from the returned channel will block Register function
|
||||
// when address count value changes.
|
||||
func (k *Kademlia) AddrCountC() <-chan int { |
||||
if k.addrCountC == nil { |
||||
k.addrCountC = make(chan int) |
||||
} |
||||
return k.addrCountC |
||||
} |
||||
|
||||
// Off removes a peer from among live peers
|
||||
func (k *Kademlia) Off(p OverlayConn) { |
||||
k.lock.Lock() |
||||
defer k.lock.Unlock() |
||||
var del bool |
||||
k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val { |
||||
// v cannot be nil, must check otherwise we overwrite entry
|
||||
if v == nil { |
||||
panic(fmt.Sprintf("connected peer not found %v", p)) |
||||
} |
||||
del = true |
||||
return newEntry(p.Off()) |
||||
}) |
||||
|
||||
if del { |
||||
k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(_ pot.Val) pot.Val { |
||||
// v cannot be nil, but no need to check
|
||||
return nil |
||||
}) |
||||
// send new address count value only if the peer is deleted
|
||||
if k.addrCountC != nil { |
||||
k.addrCountC <- k.addrs.Size() |
||||
} |
||||
k.sendNeighbourhoodDepthChange() |
||||
} |
||||
} |
||||
|
||||
func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(conn OverlayConn, po int) bool) { |
||||
k.lock.RLock() |
||||
defer k.lock.RUnlock() |
||||
|
||||
var startPo int |
||||
var endPo int |
||||
kadDepth := k.neighbourhoodDepth() |
||||
|
||||
k.conns.EachBin(base, pof, o, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool { |
||||
if startPo > 0 && endPo != k.MaxProxDisplay { |
||||
startPo = endPo + 1 |
||||
} |
||||
if po < kadDepth { |
||||
endPo = po |
||||
} else { |
||||
endPo = k.MaxProxDisplay |
||||
} |
||||
|
||||
for bin := startPo; bin <= endPo; bin++ { |
||||
f(func(val pot.Val, _ int) bool { |
||||
return eachBinFunc(val.(*entry).conn(), bin) |
||||
}) |
||||
} |
||||
return true |
||||
}) |
||||
} |
||||
|
||||
// EachConn is an iterator with args (base, po, f) applies f to each live peer
|
||||
// that has proximity order po or less as measured from the base
|
||||
// if base is nil, kademlia base address is used
|
||||
func (k *Kademlia) EachConn(base []byte, o int, f func(OverlayConn, int, bool) bool) { |
||||
k.lock.RLock() |
||||
defer k.lock.RUnlock() |
||||
k.eachConn(base, o, f) |
||||
} |
||||
|
||||
func (k *Kademlia) eachConn(base []byte, o int, f func(OverlayConn, int, bool) bool) { |
||||
if len(base) == 0 { |
||||
base = k.base |
||||
} |
||||
depth := k.neighbourhoodDepth() |
||||
k.conns.EachNeighbour(base, pof, func(val pot.Val, po int) bool { |
||||
if po > o { |
||||
return true |
||||
} |
||||
return f(val.(*entry).conn(), po, po >= depth) |
||||
}) |
||||
} |
||||
|
||||
// EachAddr called with (base, po, f) is an iterator applying f to each known peer
|
||||
// that has proximity order po or less as measured from the base
|
||||
// if base is nil, kademlia base address is used
|
||||
func (k *Kademlia) EachAddr(base []byte, o int, f func(OverlayAddr, int, bool) bool) { |
||||
k.lock.RLock() |
||||
defer k.lock.RUnlock() |
||||
k.eachAddr(base, o, f) |
||||
} |
||||
|
||||
func (k *Kademlia) eachAddr(base []byte, o int, f func(OverlayAddr, int, bool) bool) { |
||||
if len(base) == 0 { |
||||
base = k.base |
||||
} |
||||
depth := k.neighbourhoodDepth() |
||||
k.addrs.EachNeighbour(base, pof, func(val pot.Val, po int) bool { |
||||
if po > o { |
||||
return true |
||||
} |
||||
return f(val.(*entry).addr(), po, po >= depth) |
||||
}) |
||||
} |
||||
|
||||
// neighbourhoodDepth returns the proximity order that defines the distance of
|
||||
// the nearest neighbour set with cardinality >= MinProxBinSize
|
||||
// if there is altogether less than MinProxBinSize peers it returns 0
|
||||
// caller must hold the lock
|
||||
func (k *Kademlia) neighbourhoodDepth() (depth int) { |
||||
if k.conns.Size() < k.MinProxBinSize { |
||||
return 0 |
||||
} |
||||
var size int |
||||
f := func(v pot.Val, i int) bool { |
||||
size++ |
||||
depth = i |
||||
return size < k.MinProxBinSize |
||||
} |
||||
k.conns.EachNeighbour(k.base, pof, f) |
||||
return depth |
||||
} |
||||
|
||||
// callable when called with val,
|
||||
func (k *Kademlia) callable(val pot.Val) OverlayAddr { |
||||
e := val.(*entry) |
||||
// not callable if peer is live or exceeded maxRetries
|
||||
if e.conn() != nil || e.retries > k.MaxRetries { |
||||
return nil |
||||
} |
||||
// calculate the allowed number of retries based on time lapsed since last seen
|
||||
timeAgo := int64(time.Since(e.seenAt)) |
||||
div := int64(k.RetryExponent) |
||||
div += (150000 - rand.Int63n(300000)) * div / 1000000 |
||||
var retries int |
||||
for delta := timeAgo; delta > k.RetryInterval; delta /= div { |
||||
retries++ |
||||
} |
||||
// this is never called concurrently, so safe to increment
|
||||
// peer can be retried again
|
||||
if retries < e.retries { |
||||
log.Trace(fmt.Sprintf("%08x: %v long time since last try (at %v) needed before retry %v, wait only warrants %v", k.BaseAddr()[:4], e, timeAgo, e.retries, retries)) |
||||
return nil |
||||
} |
||||
// function to sanction or prevent suggesting a peer
|
||||
if k.Reachable != nil && !k.Reachable(e.addr()) { |
||||
log.Trace(fmt.Sprintf("%08x: peer %v is temporarily not callable", k.BaseAddr()[:4], e)) |
||||
return nil |
||||
} |
||||
e.retries++ |
||||
log.Trace(fmt.Sprintf("%08x: peer %v is callable", k.BaseAddr()[:4], e)) |
||||
|
||||
return e.addr() |
||||
} |
||||
|
||||
// BaseAddr return the kademlia base address
|
||||
func (k *Kademlia) BaseAddr() []byte { |
||||
return k.base |
||||
} |
||||
|
||||
// String returns kademlia table + kaddb table displayed with ascii
|
||||
func (k *Kademlia) String() string { |
||||
k.lock.RLock() |
||||
defer k.lock.RUnlock() |
||||
return k.string() |
||||
} |
||||
|
||||
// String returns kademlia table + kaddb table displayed with ascii
|
||||
func (k *Kademlia) string() string { |
||||
wsrow := " " |
||||
var rows []string |
||||
|
||||
rows = append(rows, "=========================================================================") |
||||
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()[:3])) |
||||
rows = append(rows, fmt.Sprintf("population: %d (%d), MinProxBinSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.MinProxBinSize, k.MinBinSize, k.MaxBinSize)) |
||||
|
||||
liverows := make([]string, k.MaxProxDisplay) |
||||
peersrows := make([]string, k.MaxProxDisplay) |
||||
|
||||
depth := k.neighbourhoodDepth() |
||||
rest := k.conns.Size() |
||||
k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool { |
||||
var rowlen int |
||||
if po >= k.MaxProxDisplay { |
||||
po = k.MaxProxDisplay - 1 |
||||
} |
||||
row := []string{fmt.Sprintf("%2d", size)} |
||||
rest -= size |
||||
f(func(val pot.Val, vpo int) bool { |
||||
e := val.(*entry) |
||||
row = append(row, fmt.Sprintf("%x", e.Address()[:2])) |
||||
rowlen++ |
||||
return rowlen < 4 |
||||
}) |
||||
r := strings.Join(row, " ") |
||||
r = r + wsrow |
||||
liverows[po] = r[:31] |
||||
return true |
||||
}) |
||||
|
||||
k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool { |
||||
var rowlen int |
||||
if po >= k.MaxProxDisplay { |
||||
po = k.MaxProxDisplay - 1 |
||||
} |
||||
if size < 0 { |
||||
panic("wtf") |
||||
} |
||||
row := []string{fmt.Sprintf("%2d", size)} |
||||
// we are displaying live peers too
|
||||
f(func(val pot.Val, vpo int) bool { |
||||
e := val.(*entry) |
||||
row = append(row, Label(e)) |
||||
rowlen++ |
||||
return rowlen < 4 |
||||
}) |
||||
peersrows[po] = strings.Join(row, " ") |
||||
return true |
||||
}) |
||||
|
||||
for i := 0; i < k.MaxProxDisplay; i++ { |
||||
if i == depth { |
||||
rows = append(rows, fmt.Sprintf("============ DEPTH: %d ==========================================", i)) |
||||
} |
||||
left := liverows[i] |
||||
right := peersrows[i] |
||||
if len(left) == 0 { |
||||
left = " 0 " |
||||
} |
||||
if len(right) == 0 { |
||||
right = " 0" |
||||
} |
||||
rows = append(rows, fmt.Sprintf("%03d %v | %v", i, left, right)) |
||||
} |
||||
rows = append(rows, "=========================================================================") |
||||
return "\n" + strings.Join(rows, "\n") |
||||
} |
||||
|
||||
// PeerPot keeps info about expected nearest neighbours and empty bins
|
||||
// used for testing only
|
||||
type PeerPot struct { |
||||
NNSet [][]byte |
||||
EmptyBins []int |
||||
} |
||||
|
||||
// NewPeerPotMap creates a map of pot record of OverlayAddr with keys
|
||||
// as hexadecimal representations of the address.
|
||||
func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot { |
||||
// create a table of all nodes for health check
|
||||
np := pot.NewPot(nil, 0) |
||||
for _, addr := range addrs { |
||||
np, _, _ = pot.Add(np, addr, pof) |
||||
} |
||||
ppmap := make(map[string]*PeerPot) |
||||
|
||||
for i, a := range addrs { |
||||
pl := 256 |
||||
prev := 256 |
||||
var emptyBins []int |
||||
var nns [][]byte |
||||
np.EachNeighbour(addrs[i], pof, func(val pot.Val, po int) bool { |
||||
a := val.([]byte) |
||||
if po == 256 { |
||||
return true |
||||
} |
||||
if pl == 256 || pl == po { |
||||
nns = append(nns, a) |
||||
} |
||||
if pl == 256 && len(nns) >= kadMinProxSize { |
||||
pl = po |
||||
prev = po |
||||
} |
||||
if prev < pl { |
||||
for j := prev; j > po; j-- { |
||||
emptyBins = append(emptyBins, j) |
||||
} |
||||
} |
||||
prev = po - 1 |
||||
return true |
||||
}) |
||||
for j := prev; j >= 0; j-- { |
||||
emptyBins = append(emptyBins, j) |
||||
} |
||||
log.Trace(fmt.Sprintf("%x NNS: %s", addrs[i][:4], LogAddrs(nns))) |
||||
ppmap[common.Bytes2Hex(a)] = &PeerPot{nns, emptyBins} |
||||
} |
||||
return ppmap |
||||
} |
||||
|
||||
// saturation returns the lowest proximity order that the bin for that order
|
||||
// has less than n peers
|
||||
func (k *Kademlia) saturation(n int) int { |
||||
prev := -1 |
||||
k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool { |
||||
prev++ |
||||
return prev == po && size >= n |
||||
}) |
||||
depth := k.neighbourhoodDepth() |
||||
if depth < prev { |
||||
return depth |
||||
} |
||||
return prev |
||||
} |
||||
|
||||
// full returns true if all required bins have connected peers.
|
||||
// It is used in Healthy function.
|
||||
func (k *Kademlia) full(emptyBins []int) (full bool) { |
||||
prev := 0 |
||||
e := len(emptyBins) |
||||
ok := true |
||||
depth := k.neighbourhoodDepth() |
||||
k.conns.EachBin(k.base, pof, 0, func(po, _ int, _ func(func(val pot.Val, i int) bool) bool) bool { |
||||
if prev == depth+1 { |
||||
return true |
||||
} |
||||
for i := prev; i < po; i++ { |
||||
e-- |
||||
if e < 0 { |
||||
ok = false |
||||
return false |
||||
} |
||||
if emptyBins[e] != i { |
||||
log.Trace(fmt.Sprintf("%08x po: %d, i: %d, e: %d, emptybins: %v", k.BaseAddr()[:4], po, i, e, logEmptyBins(emptyBins))) |
||||
if emptyBins[e] < i { |
||||
panic("incorrect peerpot") |
||||
} |
||||
ok = false |
||||
return false |
||||
} |
||||
} |
||||
prev = po + 1 |
||||
return true |
||||
}) |
||||
if !ok { |
||||
return false |
||||
} |
||||
return e == 0 |
||||
} |
||||
|
||||
func (k *Kademlia) knowNearestNeighbours(peers [][]byte) bool { |
||||
pm := make(map[string]bool) |
||||
|
||||
k.eachAddr(nil, 255, func(p OverlayAddr, po int, nn bool) bool { |
||||
if !nn { |
||||
return false |
||||
} |
||||
pk := fmt.Sprintf("%x", p.Address()) |
||||
pm[pk] = true |
||||
return true |
||||
}) |
||||
for _, p := range peers { |
||||
pk := fmt.Sprintf("%x", p) |
||||
if !pm[pk] { |
||||
log.Trace(fmt.Sprintf("%08x: known nearest neighbour %s not found", k.BaseAddr()[:4], pk[:8])) |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
func (k *Kademlia) gotNearestNeighbours(peers [][]byte) (got bool, n int, missing [][]byte) { |
||||
pm := make(map[string]bool) |
||||
|
||||
k.eachConn(nil, 255, func(p OverlayConn, po int, nn bool) bool { |
||||
if !nn { |
||||
return false |
||||
} |
||||
pk := fmt.Sprintf("%x", p.Address()) |
||||
pm[pk] = true |
||||
return true |
||||
}) |
||||
var gots int |
||||
var culprits [][]byte |
||||
for _, p := range peers { |
||||
pk := fmt.Sprintf("%x", p) |
||||
if pm[pk] { |
||||
gots++ |
||||
} else { |
||||
log.Trace(fmt.Sprintf("%08x: ExpNN: %s not found", k.BaseAddr()[:4], pk[:8])) |
||||
culprits = append(culprits, p) |
||||
} |
||||
} |
||||
return gots == len(peers), gots, culprits |
||||
} |
||||
|
||||
// Health state of the Kademlia
|
||||
type Health struct { |
||||
KnowNN bool // whether node knows all its nearest neighbours
|
||||
GotNN bool // whether node is connected to all its nearest neighbours
|
||||
CountNN int // amount of nearest neighbors connected to
|
||||
CulpritsNN [][]byte // which known NNs are missing
|
||||
Full bool // whether node has a peer in each kademlia bin (where there is such a peer)
|
||||
Hive string |
||||
} |
||||
|
||||
// Healthy reports the health state of the kademlia connectivity
|
||||
// returns a Health struct
|
||||
func (k *Kademlia) Healthy(pp *PeerPot) *Health { |
||||
k.lock.RLock() |
||||
defer k.lock.RUnlock() |
||||
gotnn, countnn, culpritsnn := k.gotNearestNeighbours(pp.NNSet) |
||||
knownn := k.knowNearestNeighbours(pp.NNSet) |
||||
full := k.full(pp.EmptyBins) |
||||
log.Trace(fmt.Sprintf("%08x: healthy: knowNNs: %v, gotNNs: %v, full: %v\n", k.BaseAddr()[:4], knownn, gotnn, full)) |
||||
return &Health{knownn, gotnn, countnn, culpritsnn, full, k.string()} |
||||
} |
||||
|
||||
func logEmptyBins(ebs []int) string { |
||||
var ebss []string |
||||
for _, eb := range ebs { |
||||
ebss = append(ebss, fmt.Sprintf("%d", eb)) |
||||
} |
||||
return strings.Join(ebss, ", ") |
||||
} |
@ -1,173 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package kademlia |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/rand" |
||||
"strings" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
type Address common.Hash |
||||
|
||||
func (a Address) String() string { |
||||
return fmt.Sprintf("%x", a[:]) |
||||
} |
||||
|
||||
func (a *Address) MarshalJSON() (out []byte, err error) { |
||||
return []byte(`"` + a.String() + `"`), nil |
||||
} |
||||
|
||||
func (a *Address) UnmarshalJSON(value []byte) error { |
||||
*a = Address(common.HexToHash(string(value[1 : len(value)-1]))) |
||||
return nil |
||||
} |
||||
|
||||
// the string form of the binary representation of an address (only first 8 bits)
|
||||
func (a Address) Bin() string { |
||||
var bs []string |
||||
for _, b := range a[:] { |
||||
bs = append(bs, fmt.Sprintf("%08b", b)) |
||||
} |
||||
return strings.Join(bs, "") |
||||
} |
||||
|
||||
/* |
||||
Proximity(x, y) returns the proximity order of the MSB distance between x and y |
||||
|
||||
The distance metric MSB(x, y) of two equal length byte sequences x and y is the |
||||
value of the binary integer cast of the x^y, ie., x and y bitwise xor-ed. |
||||
the binary cast is big endian: most significant bit first (=MSB). |
||||
|
||||
Proximity(x, y) is a discrete logarithmic scaling of the MSB distance. |
||||
It is defined as the reverse rank of the integer part of the base 2 |
||||
logarithm of the distance. |
||||
It is calculated by counting the number of common leading zeros in the (MSB) |
||||
binary representation of the x^y. |
||||
|
||||
(0 farthest, 255 closest, 256 self) |
||||
*/ |
||||
func proximity(one, other Address) (ret int) { |
||||
for i := 0; i < len(one); i++ { |
||||
oxo := one[i] ^ other[i] |
||||
for j := 0; j < 8; j++ { |
||||
if (oxo>>uint8(7-j))&0x01 != 0 { |
||||
return i*8 + j |
||||
} |
||||
} |
||||
} |
||||
return len(one) * 8 |
||||
} |
||||
|
||||
// Address.ProxCmp compares the distances a->target and b->target.
|
||||
// Returns -1 if a is closer to target, 1 if b is closer to target
|
||||
// and 0 if they are equal.
|
||||
func (target Address) ProxCmp(a, b Address) int { |
||||
for i := range target { |
||||
da := a[i] ^ target[i] |
||||
db := b[i] ^ target[i] |
||||
if da > db { |
||||
return 1 |
||||
} else if da < db { |
||||
return -1 |
||||
} |
||||
} |
||||
return 0 |
||||
} |
||||
|
||||
// randomAddressAt(address, prox) generates a random address
|
||||
// at proximity order prox relative to address
|
||||
// if prox is negative a random address is generated
|
||||
func RandomAddressAt(self Address, prox int) (addr Address) { |
||||
addr = self |
||||
var pos int |
||||
if prox >= 0 { |
||||
pos = prox / 8 |
||||
trans := prox % 8 |
||||
transbytea := byte(0) |
||||
for j := 0; j <= trans; j++ { |
||||
transbytea |= 1 << uint8(7-j) |
||||
} |
||||
flipbyte := byte(1 << uint8(7-trans)) |
||||
transbyteb := transbytea ^ byte(255) |
||||
randbyte := byte(rand.Intn(255)) |
||||
addr[pos] = ((addr[pos] & transbytea) ^ flipbyte) | randbyte&transbyteb |
||||
} |
||||
for i := pos + 1; i < len(addr); i++ { |
||||
addr[i] = byte(rand.Intn(255)) |
||||
} |
||||
|
||||
return |
||||
} |
||||
|
||||
// KeyRange(a0, a1, proxLimit) returns the address inclusive address
|
||||
// range that contain addresses closer to one than other
|
||||
func KeyRange(one, other Address, proxLimit int) (start, stop Address) { |
||||
prox := proximity(one, other) |
||||
if prox >= proxLimit { |
||||
prox = proxLimit |
||||
} |
||||
start = CommonBitsAddrByte(one, other, byte(0x00), prox) |
||||
stop = CommonBitsAddrByte(one, other, byte(0xff), prox) |
||||
return |
||||
} |
||||
|
||||
func CommonBitsAddrF(self, other Address, f func() byte, p int) (addr Address) { |
||||
prox := proximity(self, other) |
||||
var pos int |
||||
if p <= prox { |
||||
prox = p |
||||
} |
||||
pos = prox / 8 |
||||
addr = self |
||||
trans := byte(prox % 8) |
||||
var transbytea byte |
||||
if p > prox { |
||||
transbytea = byte(0x7f) |
||||
} else { |
||||
transbytea = byte(0xff) |
||||
} |
||||
transbytea >>= trans |
||||
transbyteb := transbytea ^ byte(0xff) |
||||
addrpos := addr[pos] |
||||
addrpos &= transbyteb |
||||
if p > prox { |
||||
addrpos ^= byte(0x80 >> trans) |
||||
} |
||||
addrpos |= transbytea & f() |
||||
addr[pos] = addrpos |
||||
for i := pos + 1; i < len(addr); i++ { |
||||
addr[i] = f() |
||||
} |
||||
|
||||
return |
||||
} |
||||
|
||||
func CommonBitsAddr(self, other Address, prox int) (addr Address) { |
||||
return CommonBitsAddrF(self, other, func() byte { return byte(rand.Intn(255)) }, prox) |
||||
} |
||||
|
||||
func CommonBitsAddrByte(self, other Address, b byte, prox int) (addr Address) { |
||||
return CommonBitsAddrF(self, other, func() byte { return b }, prox) |
||||
} |
||||
|
||||
// randomAddressAt() generates a random address
|
||||
func RandomAddress() Address { |
||||
return RandomAddressAt(Address{}, -1) |
||||
} |
@ -1,96 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package kademlia |
||||
|
||||
import ( |
||||
"math/rand" |
||||
"reflect" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
func (Address) Generate(rand *rand.Rand, size int) reflect.Value { |
||||
var id Address |
||||
for i := 0; i < len(id); i++ { |
||||
id[i] = byte(uint8(rand.Intn(255))) |
||||
} |
||||
return reflect.ValueOf(id) |
||||
} |
||||
|
||||
func TestCommonBitsAddrF(t *testing.T) { |
||||
a := Address(common.HexToHash("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) |
||||
b := Address(common.HexToHash("0x8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) |
||||
c := Address(common.HexToHash("0x4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) |
||||
d := Address(common.HexToHash("0x0023456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) |
||||
e := Address(common.HexToHash("0x01A3456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")) |
||||
ab := CommonBitsAddrF(a, b, func() byte { return byte(0x00) }, 10) |
||||
expab := Address(common.HexToHash("0x8000000000000000000000000000000000000000000000000000000000000000")) |
||||
|
||||
if ab != expab { |
||||
t.Fatalf("%v != %v", ab, expab) |
||||
} |
||||
ac := CommonBitsAddrF(a, c, func() byte { return byte(0x00) }, 10) |
||||
expac := Address(common.HexToHash("0x4000000000000000000000000000000000000000000000000000000000000000")) |
||||
|
||||
if ac != expac { |
||||
t.Fatalf("%v != %v", ac, expac) |
||||
} |
||||
ad := CommonBitsAddrF(a, d, func() byte { return byte(0x00) }, 10) |
||||
expad := Address(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")) |
||||
|
||||
if ad != expad { |
||||
t.Fatalf("%v != %v", ad, expad) |
||||
} |
||||
ae := CommonBitsAddrF(a, e, func() byte { return byte(0x00) }, 10) |
||||
expae := Address(common.HexToHash("0x0180000000000000000000000000000000000000000000000000000000000000")) |
||||
|
||||
if ae != expae { |
||||
t.Fatalf("%v != %v", ae, expae) |
||||
} |
||||
acf := CommonBitsAddrF(a, c, func() byte { return byte(0xff) }, 10) |
||||
expacf := Address(common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) |
||||
|
||||
if acf != expacf { |
||||
t.Fatalf("%v != %v", acf, expacf) |
||||
} |
||||
aeo := CommonBitsAddrF(a, e, func() byte { return byte(0x00) }, 2) |
||||
expaeo := Address(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000")) |
||||
|
||||
if aeo != expaeo { |
||||
t.Fatalf("%v != %v", aeo, expaeo) |
||||
} |
||||
aep := CommonBitsAddrF(a, e, func() byte { return byte(0xff) }, 2) |
||||
expaep := Address(common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) |
||||
|
||||
if aep != expaep { |
||||
t.Fatalf("%v != %v", aep, expaep) |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestRandomAddressAt(t *testing.T) { |
||||
var a Address |
||||
for i := 0; i < 100; i++ { |
||||
a = RandomAddress() |
||||
prox := rand.Intn(255) |
||||
b := RandomAddressAt(a, prox) |
||||
if proximity(a, b) != prox { |
||||
t.Fatalf("incorrect address prox(%v, %v) == %v (expected %v)", a, b, proximity(a, b), prox) |
||||
} |
||||
} |
||||
} |
@ -1,350 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package kademlia |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"os" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
type NodeData interface { |
||||
json.Marshaler |
||||
json.Unmarshaler |
||||
} |
||||
|
||||
// allow inactive peers under
|
||||
type NodeRecord struct { |
||||
Addr Address // address of node
|
||||
Url string // Url, used to connect to node
|
||||
After time.Time // next call after time
|
||||
Seen time.Time // last connected at time
|
||||
Meta *json.RawMessage // arbitrary metadata saved for a peer
|
||||
|
||||
node Node |
||||
} |
||||
|
||||
func (self *NodeRecord) setSeen() { |
||||
t := time.Now() |
||||
self.Seen = t |
||||
self.After = t |
||||
} |
||||
|
||||
func (self *NodeRecord) String() string { |
||||
return fmt.Sprintf("<%v>", self.Addr) |
||||
} |
||||
|
||||
// persisted node record database ()
|
||||
type KadDb struct { |
||||
Address Address |
||||
Nodes [][]*NodeRecord |
||||
index map[Address]*NodeRecord |
||||
cursors []int |
||||
lock sync.RWMutex |
||||
purgeInterval time.Duration |
||||
initialRetryInterval time.Duration |
||||
connRetryExp int |
||||
} |
||||
|
||||
func newKadDb(addr Address, params *KadParams) *KadDb { |
||||
return &KadDb{ |
||||
Address: addr, |
||||
Nodes: make([][]*NodeRecord, params.MaxProx+1), // overwritten by load
|
||||
cursors: make([]int, params.MaxProx+1), |
||||
index: make(map[Address]*NodeRecord), |
||||
purgeInterval: params.PurgeInterval, |
||||
initialRetryInterval: params.InitialRetryInterval, |
||||
connRetryExp: params.ConnRetryExp, |
||||
} |
||||
} |
||||
|
||||
func (self *KadDb) findOrCreate(index int, a Address, url string) *NodeRecord { |
||||
defer self.lock.Unlock() |
||||
self.lock.Lock() |
||||
|
||||
record, found := self.index[a] |
||||
if !found { |
||||
record = &NodeRecord{ |
||||
Addr: a, |
||||
Url: url, |
||||
} |
||||
log.Info(fmt.Sprintf("add new record %v to kaddb", record)) |
||||
// insert in kaddb
|
||||
self.index[a] = record |
||||
self.Nodes[index] = append(self.Nodes[index], record) |
||||
} else { |
||||
log.Info(fmt.Sprintf("found record %v in kaddb", record)) |
||||
} |
||||
// update last seen time
|
||||
record.setSeen() |
||||
// update with url in case IP/port changes
|
||||
record.Url = url |
||||
return record |
||||
} |
||||
|
||||
// add adds node records to kaddb (persisted node record db)
|
||||
func (self *KadDb) add(nrs []*NodeRecord, proximityBin func(Address) int) { |
||||
defer self.lock.Unlock() |
||||
self.lock.Lock() |
||||
var n int |
||||
var nodes []*NodeRecord |
||||
for _, node := range nrs { |
||||
_, found := self.index[node.Addr] |
||||
if !found && node.Addr != self.Address { |
||||
node.setSeen() |
||||
self.index[node.Addr] = node |
||||
index := proximityBin(node.Addr) |
||||
dbcursor := self.cursors[index] |
||||
nodes = self.Nodes[index] |
||||
// this is inefficient for allocation, need to just append then shift
|
||||
newnodes := make([]*NodeRecord, len(nodes)+1) |
||||
copy(newnodes[:], nodes[:dbcursor]) |
||||
newnodes[dbcursor] = node |
||||
copy(newnodes[dbcursor+1:], nodes[dbcursor:]) |
||||
log.Trace(fmt.Sprintf("new nodes: %v, nodes: %v", newnodes, nodes)) |
||||
self.Nodes[index] = newnodes |
||||
n++ |
||||
} |
||||
} |
||||
if n > 0 { |
||||
log.Debug(fmt.Sprintf("%d/%d node records (new/known)", n, len(nrs))) |
||||
} |
||||
} |
||||
|
||||
/* |
||||
next return one node record with the highest priority for desired |
||||
connection. |
||||
This is used to pick candidates for live nodes that are most wanted for |
||||
a higly connected low centrality network structure for Swarm which best suits |
||||
for a Kademlia-style routing. |
||||
|
||||
* Starting as naive node with empty db, this implements Kademlia bootstrapping |
||||
* As a mature node, it fills short lines. All on demand. |
||||
|
||||
The candidate is chosen using the following strategy: |
||||
We check for missing online nodes in the buckets for 1 upto Max BucketSize rounds. |
||||
On each round we proceed from the low to high proximity order buckets. |
||||
If the number of active nodes (=connected peers) is < rounds, then start looking |
||||
for a known candidate. To determine if there is a candidate to recommend the |
||||
kaddb node record database row corresponding to the bucket is checked. |
||||
|
||||
If the row cursor is on position i, the ith element in the row is chosen. |
||||
If the record is scheduled not to be retried before NOW, the next element is taken. |
||||
If the record is scheduled to be retried, it is set as checked, scheduled for |
||||
checking and is returned. The time of the next check is in X (duration) such that |
||||
X = ConnRetryExp * delta where delta is the time past since the last check and |
||||
ConnRetryExp is constant obsoletion factor. (Note that when node records are added |
||||
from peer messages, they are marked as checked and placed at the cursor, ie. |
||||
given priority over older entries). Entries which were checked more than |
||||
purgeInterval ago are deleted from the kaddb row. If no candidate is found after |
||||
a full round of checking the next bucket up is considered. If no candidate is |
||||
found when we reach the maximum-proximity bucket, the next round starts. |
||||
|
||||
node record a is more favoured to b a > b iff a is a passive node (record of |
||||
offline past peer) |
||||
|proxBin(a)| < |proxBin(b)| |
||||
|| (proxBin(a) < proxBin(b) && |proxBin(a)| == |proxBin(b)|) |
||||
|| (proxBin(a) == proxBin(b) && lastChecked(a) < lastChecked(b)) |
||||
|
||||
|
||||
The second argument returned names the first missing slot found |
||||
*/ |
||||
func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRecord, need bool, proxLimit int) { |
||||
// return nil, proxLimit indicates that all buckets are filled
|
||||
defer self.lock.Unlock() |
||||
self.lock.Lock() |
||||
|
||||
var interval time.Duration |
||||
var found bool |
||||
var purge []bool |
||||
var delta time.Duration |
||||
var cursor int |
||||
var count int |
||||
var after time.Time |
||||
|
||||
// iterate over columns maximum bucketsize times
|
||||
for rounds := 1; rounds <= maxBinSize; rounds++ { |
||||
ROUND: |
||||
// iterate over rows from PO 0 upto MaxProx
|
||||
for po, dbrow := range self.Nodes { |
||||
// if row has rounds connected peers, then take the next
|
||||
if binSize(po) >= rounds { |
||||
continue ROUND |
||||
} |
||||
if !need { |
||||
// set proxlimit to the PO where the first missing slot is found
|
||||
proxLimit = po |
||||
need = true |
||||
} |
||||
purge = make([]bool, len(dbrow)) |
||||
|
||||
// there is a missing slot - finding a node to connect to
|
||||
// select a node record from the relavant kaddb row (of identical prox order)
|
||||
ROW: |
||||
for cursor = self.cursors[po]; !found && count < len(dbrow); cursor = (cursor + 1) % len(dbrow) { |
||||
count++ |
||||
node = dbrow[cursor] |
||||
|
||||
// skip already connected nodes
|
||||
if node.node != nil { |
||||
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d/%d) already connected", node.Addr, po, cursor, len(dbrow))) |
||||
continue ROW |
||||
} |
||||
|
||||
// if node is scheduled to connect
|
||||
if node.After.After(time.Now()) { |
||||
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After)) |
||||
continue ROW |
||||
} |
||||
|
||||
delta = time.Since(node.Seen) |
||||
if delta < self.initialRetryInterval { |
||||
delta = self.initialRetryInterval |
||||
} |
||||
if delta > self.purgeInterval { |
||||
// remove node
|
||||
purge[cursor] = true |
||||
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) unreachable since %v. Removed", node.Addr, po, cursor, node.Seen)) |
||||
continue ROW |
||||
} |
||||
|
||||
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After)) |
||||
|
||||
// scheduling next check
|
||||
interval = delta * time.Duration(self.connRetryExp) |
||||
after = time.Now().Add(interval) |
||||
|
||||
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval)) |
||||
node.After = after |
||||
found = true |
||||
} // ROW
|
||||
self.cursors[po] = cursor |
||||
self.delete(po, purge) |
||||
if found { |
||||
return node, need, proxLimit |
||||
} |
||||
} // ROUND
|
||||
} // ROUNDS
|
||||
|
||||
return nil, need, proxLimit |
||||
} |
||||
|
||||
// deletes the noderecords of a kaddb row corresponding to the indexes
|
||||
// caller must hold the dblock
|
||||
// the call is unsafe, no index checks
|
||||
func (self *KadDb) delete(row int, purge []bool) { |
||||
var nodes []*NodeRecord |
||||
dbrow := self.Nodes[row] |
||||
for i, del := range purge { |
||||
if i == self.cursors[row] { |
||||
//reset cursor
|
||||
self.cursors[row] = len(nodes) |
||||
} |
||||
// delete the entry to be purged
|
||||
if del { |
||||
delete(self.index, dbrow[i].Addr) |
||||
continue |
||||
} |
||||
// otherwise append to new list
|
||||
nodes = append(nodes, dbrow[i]) |
||||
} |
||||
self.Nodes[row] = nodes |
||||
} |
||||
|
||||
// save persists kaddb on disk (written to file on path in json format.
|
||||
func (self *KadDb) save(path string, cb func(*NodeRecord, Node)) error { |
||||
defer self.lock.Unlock() |
||||
self.lock.Lock() |
||||
|
||||
var n int |
||||
|
||||
for _, b := range self.Nodes { |
||||
for _, node := range b { |
||||
n++ |
||||
node.After = time.Now() |
||||
node.Seen = time.Now() |
||||
if cb != nil { |
||||
cb(node, node.node) |
||||
} |
||||
} |
||||
} |
||||
|
||||
data, err := json.MarshalIndent(self, "", " ") |
||||
if err != nil { |
||||
return err |
||||
} |
||||
err = ioutil.WriteFile(path, data, os.ModePerm) |
||||
if err != nil { |
||||
log.Warn(fmt.Sprintf("unable to save kaddb with %v nodes to %v: %v", n, path, err)) |
||||
} else { |
||||
log.Info(fmt.Sprintf("saved kaddb with %v nodes to %v", n, path)) |
||||
} |
||||
return err |
||||
} |
||||
|
||||
// Load(path) loads the node record database (kaddb) from file on path.
|
||||
func (self *KadDb) load(path string, cb func(*NodeRecord, Node) error) (err error) { |
||||
defer self.lock.Unlock() |
||||
self.lock.Lock() |
||||
|
||||
var data []byte |
||||
data, err = ioutil.ReadFile(path) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
err = json.Unmarshal(data, self) |
||||
if err != nil { |
||||
return |
||||
} |
||||
var n int |
||||
var purge []bool |
||||
for po, b := range self.Nodes { |
||||
purge = make([]bool, len(b)) |
||||
ROW: |
||||
for i, node := range b { |
||||
if cb != nil { |
||||
err = cb(node, node.node) |
||||
if err != nil { |
||||
purge[i] = true |
||||
continue ROW |
||||
} |
||||
} |
||||
n++ |
||||
if node.After.IsZero() { |
||||
node.After = time.Now() |
||||
} |
||||
self.index[node.Addr] = node |
||||
} |
||||
self.delete(po, purge) |
||||
} |
||||
log.Info(fmt.Sprintf("loaded kaddb with %v nodes from %v", n, path)) |
||||
|
||||
return |
||||
} |
||||
|
||||
// accessor for KAD offline db count
|
||||
func (self *KadDb) count() int { |
||||
defer self.lock.Unlock() |
||||
self.lock.Lock() |
||||
return len(self.index) |
||||
} |
@ -1,454 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package kademlia |
||||
|
||||
import ( |
||||
"fmt" |
||||
"sort" |
||||
"strings" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
) |
||||
|
||||
//metrics variables
|
||||
//For metrics, we want to count how many times peers are added/removed
|
||||
//at a certain index. Thus we do that with an array of counters with
|
||||
//entry for each index
|
||||
var ( |
||||
bucketAddIndexCount []metrics.Counter |
||||
bucketRmIndexCount []metrics.Counter |
||||
) |
||||
|
||||
const ( |
||||
bucketSize = 4 |
||||
proxBinSize = 2 |
||||
maxProx = 8 |
||||
connRetryExp = 2 |
||||
maxPeers = 100 |
||||
) |
||||
|
||||
var ( |
||||
purgeInterval = 42 * time.Hour |
||||
initialRetryInterval = 42 * time.Millisecond |
||||
maxIdleInterval = 42 * 1000 * time.Millisecond |
||||
// maxIdleInterval = 42 * 10 0 * time.Millisecond
|
||||
) |
||||
|
||||
type KadParams struct { |
||||
// adjustable parameters
|
||||
MaxProx int |
||||
ProxBinSize int |
||||
BucketSize int |
||||
PurgeInterval time.Duration |
||||
InitialRetryInterval time.Duration |
||||
MaxIdleInterval time.Duration |
||||
ConnRetryExp int |
||||
} |
||||
|
||||
func NewDefaultKadParams() *KadParams { |
||||
return &KadParams{ |
||||
MaxProx: maxProx, |
||||
ProxBinSize: proxBinSize, |
||||
BucketSize: bucketSize, |
||||
PurgeInterval: purgeInterval, |
||||
InitialRetryInterval: initialRetryInterval, |
||||
MaxIdleInterval: maxIdleInterval, |
||||
ConnRetryExp: connRetryExp, |
||||
} |
||||
} |
||||
|
||||
// Kademlia is a table of active nodes
|
||||
type Kademlia struct { |
||||
addr Address // immutable baseaddress of the table
|
||||
*KadParams // Kademlia configuration parameters
|
||||
proxLimit int // state, the PO of the first row of the most proximate bin
|
||||
proxSize int // state, the number of peers in the most proximate bin
|
||||
count int // number of active peers (w live connection)
|
||||
buckets [][]Node // the actual bins
|
||||
db *KadDb // kaddb, node record database
|
||||
lock sync.RWMutex // mutex to access buckets
|
||||
} |
||||
|
||||
type Node interface { |
||||
Addr() Address |
||||
Url() string |
||||
LastActive() time.Time |
||||
Drop() |
||||
} |
||||
|
||||
// public constructor
|
||||
// add is the base address of the table
|
||||
// params is KadParams configuration
|
||||
func New(addr Address, params *KadParams) *Kademlia { |
||||
buckets := make([][]Node, params.MaxProx+1) |
||||
kad := &Kademlia{ |
||||
addr: addr, |
||||
KadParams: params, |
||||
buckets: buckets, |
||||
db: newKadDb(addr, params), |
||||
} |
||||
kad.initMetricsVariables() |
||||
return kad |
||||
} |
||||
|
||||
// accessor for KAD base address
|
||||
func (self *Kademlia) Addr() Address { |
||||
return self.addr |
||||
} |
||||
|
||||
// accessor for KAD active node count
|
||||
func (self *Kademlia) Count() int { |
||||
defer self.lock.Unlock() |
||||
self.lock.Lock() |
||||
return self.count |
||||
} |
||||
|
||||
// accessor for KAD active node count
|
||||
func (self *Kademlia) DBCount() int { |
||||
return self.db.count() |
||||
} |
||||
|
||||
// On is the entry point called when a new nodes is added
|
||||
// unsafe in that node is not checked to be already active node (to be called once)
|
||||
func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error) { |
||||
log.Debug(fmt.Sprintf("%v", self)) |
||||
defer self.lock.Unlock() |
||||
self.lock.Lock() |
||||
|
||||
index := self.proximityBin(node.Addr()) |
||||
record := self.db.findOrCreate(index, node.Addr(), node.Url()) |
||||
|
||||
if cb != nil { |
||||
err = cb(record, node) |
||||
log.Trace(fmt.Sprintf("cb(%v, %v) ->%v", record, node, err)) |
||||
if err != nil { |
||||
return fmt.Errorf("unable to add node %v, callback error: %v", node.Addr(), err) |
||||
} |
||||
log.Debug(fmt.Sprintf("add node record %v with node %v", record, node)) |
||||
} |
||||
|
||||
// insert in kademlia table of active nodes
|
||||
bucket := self.buckets[index] |
||||
// if bucket is full insertion replaces the worst node
|
||||
// TODO: give priority to peers with active traffic
|
||||
if len(bucket) < self.BucketSize { // >= allows us to add peers beyond the bucketsize limitation
|
||||
self.buckets[index] = append(bucket, node) |
||||
bucketAddIndexCount[index].Inc(1) |
||||
log.Debug(fmt.Sprintf("add node %v to table", node)) |
||||
self.setProxLimit(index, true) |
||||
record.node = node |
||||
self.count++ |
||||
return nil |
||||
} |
||||
|
||||
// always rotate peers
|
||||
idle := self.MaxIdleInterval |
||||
var pos int |
||||
var replaced Node |
||||
for i, p := range bucket { |
||||
idleInt := time.Since(p.LastActive()) |
||||
if idleInt > idle { |
||||
idle = idleInt |
||||
pos = i |
||||
replaced = p |
||||
} |
||||
} |
||||
if replaced == nil { |
||||
log.Debug(fmt.Sprintf("all peers wanted, PO%03d bucket full", index)) |
||||
return fmt.Errorf("bucket full") |
||||
} |
||||
log.Debug(fmt.Sprintf("node %v replaced by %v (idle for %v > %v)", replaced, node, idle, self.MaxIdleInterval)) |
||||
replaced.Drop() |
||||
// actually replace in the row. When off(node) is called, the peer is no longer in the row
|
||||
bucket[pos] = node |
||||
// there is no change in bucket cardinalities so no prox limit adjustment is needed
|
||||
record.node = node |
||||
self.count++ |
||||
return nil |
||||
|
||||
} |
||||
|
||||
// Off is the called when a node is taken offline (from the protocol main loop exit)
|
||||
func (self *Kademlia) Off(node Node, cb func(*NodeRecord, Node)) (err error) { |
||||
self.lock.Lock() |
||||
defer self.lock.Unlock() |
||||
|
||||
index := self.proximityBin(node.Addr()) |
||||
bucketRmIndexCount[index].Inc(1) |
||||
bucket := self.buckets[index] |
||||
for i := 0; i < len(bucket); i++ { |
||||
if node.Addr() == bucket[i].Addr() { |
||||
self.buckets[index] = append(bucket[:i], bucket[(i+1):]...) |
||||
self.setProxLimit(index, false) |
||||
break |
||||
} |
||||
} |
||||
|
||||
record := self.db.index[node.Addr()] |
||||
// callback on remove
|
||||
if cb != nil { |
||||
cb(record, record.node) |
||||
} |
||||
record.node = nil |
||||
self.count-- |
||||
log.Debug(fmt.Sprintf("remove node %v from table, population now is %v", node, self.count)) |
||||
|
||||
return |
||||
} |
||||
|
||||
// proxLimit is dynamically adjusted so that
|
||||
// 1) there is no empty buckets in bin < proxLimit and
|
||||
// 2) the sum of all items are the minimum possible but higher than ProxBinSize
|
||||
// adjust Prox (proxLimit and proxSize after an insertion/removal of nodes)
|
||||
// caller holds the lock
|
||||
func (self *Kademlia) setProxLimit(r int, on bool) { |
||||
// if the change is outside the core (PO lower)
|
||||
// and the change does not leave a bucket empty then
|
||||
// no adjustment needed
|
||||
if r < self.proxLimit && len(self.buckets[r]) > 0 { |
||||
return |
||||
} |
||||
// if on=a node was added, then r must be within prox limit so increment cardinality
|
||||
if on { |
||||
self.proxSize++ |
||||
curr := len(self.buckets[self.proxLimit]) |
||||
// if now core is big enough without the furthest bucket, then contract
|
||||
// this can result in more than one bucket change
|
||||
for self.proxSize >= self.ProxBinSize+curr && curr > 0 { |
||||
self.proxSize -= curr |
||||
self.proxLimit++ |
||||
curr = len(self.buckets[self.proxLimit]) |
||||
|
||||
log.Trace(fmt.Sprintf("proxbin contraction (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r)) |
||||
} |
||||
return |
||||
} |
||||
// otherwise
|
||||
if r >= self.proxLimit { |
||||
self.proxSize-- |
||||
} |
||||
// expand core by lowering prox limit until hit zero or cover the empty bucket or reached target cardinality
|
||||
for (self.proxSize < self.ProxBinSize || r < self.proxLimit) && |
||||
self.proxLimit > 0 { |
||||
//
|
||||
self.proxLimit-- |
||||
self.proxSize += len(self.buckets[self.proxLimit]) |
||||
log.Trace(fmt.Sprintf("proxbin expansion (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r)) |
||||
} |
||||
} |
||||
|
||||
/* |
||||
returns the list of nodes belonging to the same proximity bin |
||||
as the target. The most proximate bin will be the union of the bins between |
||||
proxLimit and MaxProx. |
||||
*/ |
||||
func (self *Kademlia) FindClosest(target Address, max int) []Node { |
||||
self.lock.Lock() |
||||
defer self.lock.Unlock() |
||||
|
||||
r := nodesByDistance{ |
||||
target: target, |
||||
} |
||||
|
||||
po := self.proximityBin(target) |
||||
index := po |
||||
step := 1 |
||||
log.Trace(fmt.Sprintf("serving %v nodes at %v (PO%02d)", max, index, po)) |
||||
|
||||
// if max is set to 0, just want a full bucket, dynamic number
|
||||
min := max |
||||
// set limit to max
|
||||
limit := max |
||||
if max == 0 { |
||||
min = 1 |
||||
limit = maxPeers |
||||
} |
||||
|
||||
var n int |
||||
for index >= 0 { |
||||
// add entire bucket
|
||||
for _, p := range self.buckets[index] { |
||||
r.push(p, limit) |
||||
n++ |
||||
} |
||||
// terminate if index reached the bottom or enough peers > min
|
||||
log.Trace(fmt.Sprintf("add %v -> %v (PO%02d, PO%03d)", len(self.buckets[index]), n, index, po)) |
||||
if n >= min && (step < 0 || max == 0) { |
||||
break |
||||
} |
||||
// reach top most non-empty PO bucket, turn around
|
||||
if index == self.MaxProx { |
||||
index = po |
||||
step = -1 |
||||
} |
||||
index += step |
||||
} |
||||
log.Trace(fmt.Sprintf("serve %d (<=%d) nodes for target lookup %v (PO%03d)", n, max, target, po)) |
||||
return r.nodes |
||||
} |
||||
|
||||
func (self *Kademlia) Suggest() (*NodeRecord, bool, int) { |
||||
defer self.lock.RUnlock() |
||||
self.lock.RLock() |
||||
return self.db.findBest(self.BucketSize, func(i int) int { return len(self.buckets[i]) }) |
||||
} |
||||
|
||||
// adds node records to kaddb (persisted node record db)
|
||||
func (self *Kademlia) Add(nrs []*NodeRecord) { |
||||
self.db.add(nrs, self.proximityBin) |
||||
} |
||||
|
||||
// nodesByDistance is a list of nodes, ordered by distance to target.
|
||||
type nodesByDistance struct { |
||||
nodes []Node |
||||
target Address |
||||
} |
||||
|
||||
func sortedByDistanceTo(target Address, slice []Node) bool { |
||||
var last Address |
||||
for i, node := range slice { |
||||
if i > 0 { |
||||
if target.ProxCmp(node.Addr(), last) < 0 { |
||||
return false |
||||
} |
||||
} |
||||
last = node.Addr() |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// push(node, max) adds the given node to the list, keeping the total size
|
||||
// below max elements.
|
||||
func (h *nodesByDistance) push(node Node, max int) { |
||||
// returns the firt index ix such that func(i) returns true
|
||||
ix := sort.Search(len(h.nodes), func(i int) bool { |
||||
return h.target.ProxCmp(h.nodes[i].Addr(), node.Addr()) >= 0 |
||||
}) |
||||
|
||||
if len(h.nodes) < max { |
||||
h.nodes = append(h.nodes, node) |
||||
} |
||||
if ix < len(h.nodes) { |
||||
copy(h.nodes[ix+1:], h.nodes[ix:]) |
||||
h.nodes[ix] = node |
||||
} |
||||
} |
||||
|
||||
/* |
||||
Taking the proximity order relative to a fix point x classifies the points in |
||||
the space (n byte long byte sequences) into bins. Items in each are at |
||||
most half as distant from x as items in the previous bin. Given a sample of |
||||
uniformly distributed items (a hash function over arbitrary sequence) the |
||||
proximity scale maps onto series of subsets with cardinalities on a negative |
||||
exponential scale. |
||||
|
||||
It also has the property that any two item belonging to the same bin are at |
||||
most half as distant from each other as they are from x. |
||||
|
||||
If we think of random sample of items in the bins as connections in a network of interconnected nodes than relative proximity can serve as the basis for local |
||||
decisions for graph traversal where the task is to find a route between two |
||||
points. Since in every hop, the finite distance halves, there is |
||||
a guaranteed constant maximum limit on the number of hops needed to reach one |
||||
node from the other. |
||||
*/ |
||||
|
||||
func (self *Kademlia) proximityBin(other Address) (ret int) { |
||||
ret = proximity(self.addr, other) |
||||
if ret > self.MaxProx { |
||||
ret = self.MaxProx |
||||
} |
||||
return |
||||
} |
||||
|
||||
// provides keyrange for chunk db iteration
|
||||
func (self *Kademlia) KeyRange(other Address) (start, stop Address) { |
||||
defer self.lock.RUnlock() |
||||
self.lock.RLock() |
||||
return KeyRange(self.addr, other, self.proxLimit) |
||||
} |
||||
|
||||
// save persists kaddb on disk (written to file on path in json format.
|
||||
func (self *Kademlia) Save(path string, cb func(*NodeRecord, Node)) error { |
||||
return self.db.save(path, cb) |
||||
} |
||||
|
||||
// Load(path) loads the node record database (kaddb) from file on path.
|
||||
func (self *Kademlia) Load(path string, cb func(*NodeRecord, Node) error) (err error) { |
||||
return self.db.load(path, cb) |
||||
} |
||||
|
||||
// kademlia table + kaddb table displayed with ascii
|
||||
func (self *Kademlia) String() string { |
||||
defer self.lock.RUnlock() |
||||
self.lock.RLock() |
||||
defer self.db.lock.RUnlock() |
||||
self.db.lock.RLock() |
||||
|
||||
var rows []string |
||||
rows = append(rows, "=========================================================================") |
||||
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %v", time.Now().UTC().Format(time.UnixDate), self.addr.String()[:6])) |
||||
rows = append(rows, fmt.Sprintf("population: %d (%d), proxLimit: %d, proxSize: %d", self.count, len(self.db.index), self.proxLimit, self.proxSize)) |
||||
rows = append(rows, fmt.Sprintf("MaxProx: %d, ProxBinSize: %d, BucketSize: %d", self.MaxProx, self.ProxBinSize, self.BucketSize)) |
||||
|
||||
for i, bucket := range self.buckets { |
||||
|
||||
if i == self.proxLimit { |
||||
rows = append(rows, fmt.Sprintf("============ PROX LIMIT: %d ==========================================", i)) |
||||
} |
||||
row := []string{fmt.Sprintf("%03d", i), fmt.Sprintf("%2d", len(bucket))} |
||||
var k int |
||||
c := self.db.cursors[i] |
||||
for ; k < len(bucket); k++ { |
||||
p := bucket[(c+k)%len(bucket)] |
||||
row = append(row, p.Addr().String()[:6]) |
||||
if k == 4 { |
||||
break |
||||
} |
||||
} |
||||
for ; k < 4; k++ { |
||||
row = append(row, " ") |
||||
} |
||||
row = append(row, fmt.Sprintf("| %2d %2d", len(self.db.Nodes[i]), self.db.cursors[i])) |
||||
|
||||
for j, p := range self.db.Nodes[i] { |
||||
row = append(row, p.Addr.String()[:6]) |
||||
if j == 3 { |
||||
break |
||||
} |
||||
} |
||||
rows = append(rows, strings.Join(row, " ")) |
||||
if i == self.MaxProx { |
||||
} |
||||
} |
||||
rows = append(rows, "=========================================================================") |
||||
return strings.Join(rows, "\n") |
||||
} |
||||
|
||||
//We have to build up the array of counters for each index
|
||||
func (self *Kademlia) initMetricsVariables() { |
||||
//create the arrays
|
||||
bucketAddIndexCount = make([]metrics.Counter, self.MaxProx+1) |
||||
bucketRmIndexCount = make([]metrics.Counter, self.MaxProx+1) |
||||
//at each index create a metrics counter
|
||||
for i := 0; i < (self.KadParams.MaxProx + 1); i++ { |
||||
bucketAddIndexCount[i] = metrics.NewRegisteredCounter(fmt.Sprintf("network.kademlia.bucket.add.%d.index", i), nil) |
||||
bucketRmIndexCount[i] = metrics.NewRegisteredCounter(fmt.Sprintf("network.kademlia.bucket.rm.%d.index", i), nil) |
||||
} |
||||
} |
@ -1,392 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package kademlia |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math" |
||||
"math/rand" |
||||
"os" |
||||
"path/filepath" |
||||
"reflect" |
||||
"testing" |
||||
"testing/quick" |
||||
"time" |
||||
) |
||||
|
||||
var ( |
||||
quickrand = rand.New(rand.NewSource(time.Now().Unix())) |
||||
quickcfgFindClosest = &quick.Config{MaxCount: 50, Rand: quickrand} |
||||
quickcfgBootStrap = &quick.Config{MaxCount: 100, Rand: quickrand} |
||||
) |
||||
|
||||
type testNode struct { |
||||
addr Address |
||||
} |
||||
|
||||
func (n *testNode) String() string { |
||||
return fmt.Sprintf("%x", n.addr[:]) |
||||
} |
||||
|
||||
func (n *testNode) Addr() Address { |
||||
return n.addr |
||||
} |
||||
|
||||
func (n *testNode) Drop() { |
||||
} |
||||
|
||||
func (n *testNode) Url() string { |
||||
return "" |
||||
} |
||||
|
||||
func (n *testNode) LastActive() time.Time { |
||||
return time.Now() |
||||
} |
||||
|
||||
func TestOn(t *testing.T) { |
||||
addr, ok1 := gen(Address{}, quickrand).(Address) |
||||
other, ok2 := gen(Address{}, quickrand).(Address) |
||||
if !ok1 || !ok2 { |
||||
t.Errorf("oops") |
||||
} |
||||
kad := New(addr, NewDefaultKadParams()) |
||||
err := kad.On(&testNode{addr: other}, nil) |
||||
_ = err |
||||
} |
||||
|
||||
func TestBootstrap(t *testing.T) { |
||||
|
||||
test := func(test *bootstrapTest) bool { |
||||
// for any node kad.le, Target and N
|
||||
params := NewDefaultKadParams() |
||||
params.MaxProx = test.MaxProx |
||||
params.BucketSize = test.BucketSize |
||||
params.ProxBinSize = test.BucketSize |
||||
kad := New(test.Self, params) |
||||
var err error |
||||
|
||||
for p := 0; p < 9; p++ { |
||||
var nrs []*NodeRecord |
||||
n := math.Pow(float64(2), float64(7-p)) |
||||
for i := 0; i < int(n); i++ { |
||||
addr := RandomAddressAt(test.Self, p) |
||||
nrs = append(nrs, &NodeRecord{ |
||||
Addr: addr, |
||||
}) |
||||
} |
||||
kad.Add(nrs) |
||||
} |
||||
|
||||
node := &testNode{test.Self} |
||||
|
||||
n := 0 |
||||
for n < 100 { |
||||
err = kad.On(node, nil) |
||||
if err != nil { |
||||
t.Fatalf("backend not accepting node: %v", err) |
||||
} |
||||
|
||||
record, need, _ := kad.Suggest() |
||||
if !need { |
||||
break |
||||
} |
||||
n++ |
||||
if record == nil { |
||||
continue |
||||
} |
||||
node = &testNode{record.Addr} |
||||
} |
||||
exp := test.BucketSize * (test.MaxProx + 1) |
||||
if kad.Count() != exp { |
||||
t.Errorf("incorrect number of peers, expected %d, got %d\n%v", exp, kad.Count(), kad) |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
if err := quick.Check(test, quickcfgBootStrap); err != nil { |
||||
t.Error(err) |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestFindClosest(t *testing.T) { |
||||
|
||||
test := func(test *FindClosestTest) bool { |
||||
// for any node kad.le, Target and N
|
||||
params := NewDefaultKadParams() |
||||
params.MaxProx = 7 |
||||
kad := New(test.Self, params) |
||||
var err error |
||||
for _, node := range test.All { |
||||
err = kad.On(node, nil) |
||||
if err != nil && err.Error() != "bucket full" { |
||||
t.Fatalf("backend not accepting node: %v", err) |
||||
} |
||||
} |
||||
|
||||
if len(test.All) == 0 || test.N == 0 { |
||||
return true |
||||
} |
||||
nodes := kad.FindClosest(test.Target, test.N) |
||||
|
||||
// check that the number of results is min(N, kad.len)
|
||||
wantN := test.N |
||||
if tlen := kad.Count(); tlen < test.N { |
||||
wantN = tlen |
||||
} |
||||
|
||||
if len(nodes) != wantN { |
||||
t.Errorf("wrong number of nodes: got %d, want %d", len(nodes), wantN) |
||||
return false |
||||
} |
||||
|
||||
if hasDuplicates(nodes) { |
||||
t.Errorf("result contains duplicates") |
||||
return false |
||||
} |
||||
|
||||
if !sortedByDistanceTo(test.Target, nodes) { |
||||
t.Errorf("result is not sorted by distance to target") |
||||
return false |
||||
} |
||||
|
||||
// check that the result nodes have minimum distance to target.
|
||||
farthestResult := nodes[len(nodes)-1].Addr() |
||||
for i, b := range kad.buckets { |
||||
for j, n := range b { |
||||
if contains(nodes, n.Addr()) { |
||||
continue // don't run the check below for nodes in result
|
||||
} |
||||
if test.Target.ProxCmp(n.Addr(), farthestResult) < 0 { |
||||
_ = i * j |
||||
t.Errorf("kad.le contains node that is closer to target but it's not in result") |
||||
return false |
||||
} |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
if err := quick.Check(test, quickcfgFindClosest); err != nil { |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
type proxTest struct { |
||||
add bool |
||||
index int |
||||
addr Address |
||||
} |
||||
|
||||
var ( |
||||
addresses []Address |
||||
) |
||||
|
||||
func TestProxAdjust(t *testing.T) { |
||||
r := rand.New(rand.NewSource(time.Now().UnixNano())) |
||||
self := gen(Address{}, r).(Address) |
||||
params := NewDefaultKadParams() |
||||
params.MaxProx = 7 |
||||
kad := New(self, params) |
||||
|
||||
var err error |
||||
for i := 0; i < 100; i++ { |
||||
a := gen(Address{}, r).(Address) |
||||
addresses = append(addresses, a) |
||||
err = kad.On(&testNode{addr: a}, nil) |
||||
if err != nil && err.Error() != "bucket full" { |
||||
t.Fatalf("backend not accepting node: %v", err) |
||||
} |
||||
if !kad.proxCheck(t) { |
||||
return |
||||
} |
||||
} |
||||
test := func(test *proxTest) bool { |
||||
node := &testNode{test.addr} |
||||
if test.add { |
||||
kad.On(node, nil) |
||||
} else { |
||||
kad.Off(node, nil) |
||||
} |
||||
return kad.proxCheck(t) |
||||
} |
||||
if err := quick.Check(test, quickcfgFindClosest); err != nil { |
||||
t.Error(err) |
||||
} |
||||
} |
||||
|
||||
func TestSaveLoad(t *testing.T) { |
||||
r := rand.New(rand.NewSource(time.Now().UnixNano())) |
||||
addresses := gen([]Address{}, r).([]Address) |
||||
self := RandomAddress() |
||||
params := NewDefaultKadParams() |
||||
params.MaxProx = 7 |
||||
kad := New(self, params) |
||||
|
||||
var err error |
||||
|
||||
for _, a := range addresses { |
||||
err = kad.On(&testNode{addr: a}, nil) |
||||
if err != nil && err.Error() != "bucket full" { |
||||
t.Fatalf("backend not accepting node: %v", err) |
||||
} |
||||
} |
||||
nodes := kad.FindClosest(self, 100) |
||||
|
||||
path := filepath.Join(os.TempDir(), "bzz-kad-test-save-load.peers") |
||||
err = kad.Save(path, nil) |
||||
if err != nil && err.Error() != "bucket full" { |
||||
t.Fatalf("unepected error saving kaddb: %v", err) |
||||
} |
||||
kad = New(self, params) |
||||
err = kad.Load(path, nil) |
||||
if err != nil && err.Error() != "bucket full" { |
||||
t.Fatalf("unepected error loading kaddb: %v", err) |
||||
} |
||||
for _, b := range kad.db.Nodes { |
||||
for _, node := range b { |
||||
err = kad.On(&testNode{node.Addr}, nil) |
||||
if err != nil && err.Error() != "bucket full" { |
||||
t.Fatalf("backend not accepting node: %v", err) |
||||
} |
||||
} |
||||
} |
||||
loadednodes := kad.FindClosest(self, 100) |
||||
for i, node := range loadednodes { |
||||
if nodes[i].Addr() != node.Addr() { |
||||
t.Errorf("node mismatch at %d/%d: %v != %v", i, len(nodes), nodes[i].Addr(), node.Addr()) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (self *Kademlia) proxCheck(t *testing.T) bool { |
||||
var sum int |
||||
for i, b := range self.buckets { |
||||
l := len(b) |
||||
// if we are in the high prox multibucket
|
||||
if i >= self.proxLimit { |
||||
sum += l |
||||
} else if l == 0 { |
||||
t.Errorf("bucket %d empty, yet proxLimit is %d\n%v", len(b), self.proxLimit, self) |
||||
return false |
||||
} |
||||
} |
||||
// check if merged high prox bucket does not exceed size
|
||||
if sum > 0 { |
||||
if sum != self.proxSize { |
||||
t.Errorf("proxSize incorrect, expected %v, got %v", sum, self.proxSize) |
||||
return false |
||||
} |
||||
last := len(self.buckets[self.proxLimit]) |
||||
if last > 0 && sum >= self.ProxBinSize+last { |
||||
t.Errorf("proxLimit %v incorrect, redundant non-empty bucket %d added to proxBin with %v (target %v)\n%v", self.proxLimit, last, sum-last, self.ProxBinSize, self) |
||||
return false |
||||
} |
||||
if self.proxLimit > 0 && sum < self.ProxBinSize { |
||||
t.Errorf("proxLimit %v incorrect. proxSize %v is less than target %v, yet there is more peers", self.proxLimit, sum, self.ProxBinSize) |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
type bootstrapTest struct { |
||||
MaxProx int |
||||
BucketSize int |
||||
Self Address |
||||
} |
||||
|
||||
func (*bootstrapTest) Generate(rand *rand.Rand, size int) reflect.Value { |
||||
t := &bootstrapTest{ |
||||
Self: gen(Address{}, rand).(Address), |
||||
MaxProx: 5 + rand.Intn(2), |
||||
BucketSize: rand.Intn(3) + 1, |
||||
} |
||||
return reflect.ValueOf(t) |
||||
} |
||||
|
||||
type FindClosestTest struct { |
||||
Self Address |
||||
Target Address |
||||
All []Node |
||||
N int |
||||
} |
||||
|
||||
func (c FindClosestTest) String() string { |
||||
return fmt.Sprintf("A: %064x\nT: %064x\n(%d)\n", c.Self[:], c.Target[:], c.N) |
||||
} |
||||
|
||||
func (*FindClosestTest) Generate(rand *rand.Rand, size int) reflect.Value { |
||||
t := &FindClosestTest{ |
||||
Self: gen(Address{}, rand).(Address), |
||||
Target: gen(Address{}, rand).(Address), |
||||
N: rand.Intn(bucketSize), |
||||
} |
||||
for _, a := range gen([]Address{}, rand).([]Address) { |
||||
t.All = append(t.All, &testNode{addr: a}) |
||||
} |
||||
return reflect.ValueOf(t) |
||||
} |
||||
|
||||
func (*proxTest) Generate(rand *rand.Rand, size int) reflect.Value { |
||||
var add bool |
||||
if rand.Intn(1) == 0 { |
||||
add = true |
||||
} |
||||
var t *proxTest |
||||
if add { |
||||
t = &proxTest{ |
||||
addr: gen(Address{}, rand).(Address), |
||||
add: add, |
||||
} |
||||
} else { |
||||
t = &proxTest{ |
||||
index: rand.Intn(len(addresses)), |
||||
add: add, |
||||
} |
||||
} |
||||
return reflect.ValueOf(t) |
||||
} |
||||
|
||||
func hasDuplicates(slice []Node) bool { |
||||
seen := make(map[Address]bool) |
||||
for _, node := range slice { |
||||
if seen[node.Addr()] { |
||||
return true |
||||
} |
||||
seen[node.Addr()] = true |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func contains(nodes []Node, addr Address) bool { |
||||
for _, n := range nodes { |
||||
if n.Addr() == addr { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// gen wraps quick.Value so it's easier to use.
|
||||
// it generates a random value of the given value's type.
|
||||
func gen(typ interface{}, rand *rand.Rand) interface{} { |
||||
v, ok := quick.Value(reflect.TypeOf(typ), rand) |
||||
if !ok { |
||||
panic(fmt.Sprintf("couldn't generate random value of type %T", typ)) |
||||
} |
||||
return v.Interface() |
||||
} |
@ -0,0 +1,623 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"os" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/swarm/pot" |
||||
) |
||||
|
||||
func init() { |
||||
h := log.LvlFilterHandler(log.LvlWarn, log.StreamHandler(os.Stderr, log.TerminalFormat(true))) |
||||
log.Root().SetHandler(h) |
||||
} |
||||
|
||||
func testKadPeerAddr(s string) *BzzAddr { |
||||
a := pot.NewAddressFromString(s) |
||||
return &BzzAddr{OAddr: a, UAddr: a} |
||||
} |
||||
|
||||
type testDropPeer struct { |
||||
Peer |
||||
dropc chan error |
||||
} |
||||
|
||||
type dropError struct { |
||||
error |
||||
addr string |
||||
} |
||||
|
||||
func (d *testDropPeer) Drop(err error) { |
||||
err2 := &dropError{err, binStr(d)} |
||||
d.dropc <- err2 |
||||
} |
||||
|
||||
type testKademlia struct { |
||||
*Kademlia |
||||
Discovery bool |
||||
dropc chan error |
||||
} |
||||
|
||||
func newTestKademlia(b string) *testKademlia { |
||||
params := NewKadParams() |
||||
params.MinBinSize = 1 |
||||
params.MinProxBinSize = 2 |
||||
base := pot.NewAddressFromString(b) |
||||
return &testKademlia{ |
||||
NewKademlia(base, params), |
||||
false, |
||||
make(chan error), |
||||
} |
||||
} |
||||
|
||||
func (k *testKademlia) newTestKadPeer(s string) Peer { |
||||
return &testDropPeer{&BzzPeer{BzzAddr: testKadPeerAddr(s)}, k.dropc} |
||||
} |
||||
|
||||
func (k *testKademlia) On(ons ...string) *testKademlia { |
||||
for _, s := range ons { |
||||
k.Kademlia.On(k.newTestKadPeer(s).(OverlayConn)) |
||||
} |
||||
return k |
||||
} |
||||
|
||||
func (k *testKademlia) Off(offs ...string) *testKademlia { |
||||
for _, s := range offs { |
||||
k.Kademlia.Off(k.newTestKadPeer(s).(OverlayConn)) |
||||
} |
||||
|
||||
return k |
||||
} |
||||
|
||||
func (k *testKademlia) Register(regs ...string) *testKademlia { |
||||
var as []OverlayAddr |
||||
for _, s := range regs { |
||||
as = append(as, testKadPeerAddr(s)) |
||||
} |
||||
err := k.Kademlia.Register(as) |
||||
if err != nil { |
||||
panic(err.Error()) |
||||
} |
||||
return k |
||||
} |
||||
|
||||
func testSuggestPeer(t *testing.T, k *testKademlia, expAddr string, expPo int, expWant bool) error { |
||||
addr, o, want := k.SuggestPeer() |
||||
if binStr(addr) != expAddr { |
||||
return fmt.Errorf("incorrect peer address suggested. expected %v, got %v", expAddr, binStr(addr)) |
||||
} |
||||
if o != expPo { |
||||
return fmt.Errorf("incorrect prox order suggested. expected %v, got %v", expPo, o) |
||||
} |
||||
if want != expWant { |
||||
return fmt.Errorf("expected SuggestPeer to want peers: %v", expWant) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func binStr(a OverlayPeer) string { |
||||
if a == nil { |
||||
return "<nil>" |
||||
} |
||||
return pot.ToBin(a.Address())[:8] |
||||
} |
||||
|
||||
func TestSuggestPeerBug(t *testing.T) { |
||||
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
|
||||
k := newTestKademlia("00000000").On( |
||||
"10000000", "11000000", |
||||
"01000000", |
||||
|
||||
"00010000", "00011000", |
||||
).Off( |
||||
"01000000", |
||||
) |
||||
err := testSuggestPeer(t, k, "01000000", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
} |
||||
|
||||
func TestSuggestPeerFindPeers(t *testing.T) { |
||||
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
|
||||
k := newTestKademlia("00000000").On("00100000") |
||||
err := testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
// 2 row gap, saturated proxbin, no callables -> want PO 0
|
||||
k.On("00010000") |
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
// 1 row gap (1 less), saturated proxbin, no callables -> want PO 1
|
||||
k.On("10000000") |
||||
err = testSuggestPeer(t, k, "<nil>", 1, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
// no gap (1 less), saturated proxbin, no callables -> do not want more
|
||||
k.On("01000000", "00100001") |
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
// oversaturated proxbin, > do not want more
|
||||
k.On("00100001") |
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
// reintroduce gap, disconnected peer callable
|
||||
// log.Info(k.String())
|
||||
k.Off("01000000") |
||||
err = testSuggestPeer(t, k, "01000000", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
// second time disconnected peer not callable
|
||||
// with reasonably set Interval
|
||||
err = testSuggestPeer(t, k, "<nil>", 1, true) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
// on and off again, peer callable again
|
||||
k.On("01000000") |
||||
k.Off("01000000") |
||||
err = testSuggestPeer(t, k, "01000000", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.On("01000000") |
||||
// new closer peer appears, it is immediately wanted
|
||||
k.Register("00010001") |
||||
err = testSuggestPeer(t, k, "00010001", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
// PO1 disconnects
|
||||
k.On("00010001") |
||||
log.Info(k.String()) |
||||
k.Off("01000000") |
||||
log.Info(k.String()) |
||||
// second time, gap filling
|
||||
err = testSuggestPeer(t, k, "01000000", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.On("01000000") |
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.MinBinSize = 2 |
||||
err = testSuggestPeer(t, k, "<nil>", 0, true) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.Register("01000001") |
||||
err = testSuggestPeer(t, k, "01000001", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.On("10000001") |
||||
log.Trace(fmt.Sprintf("Kad:\n%v", k.String())) |
||||
err = testSuggestPeer(t, k, "<nil>", 1, true) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.On("01000001") |
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.MinBinSize = 3 |
||||
k.Register("10000010") |
||||
err = testSuggestPeer(t, k, "10000010", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.On("10000010") |
||||
err = testSuggestPeer(t, k, "<nil>", 1, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.On("01000010") |
||||
err = testSuggestPeer(t, k, "<nil>", 2, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.On("00100010") |
||||
err = testSuggestPeer(t, k, "<nil>", 3, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
k.On("00010010") |
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestSuggestPeerRetries(t *testing.T) { |
||||
t.Skip("Test is disabled, because it is flaky. It fails with kademlia_test.go:346: incorrect peer address suggested. expected <nil>, got 01000000") |
||||
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
|
||||
k := newTestKademlia("00000000") |
||||
k.RetryInterval = int64(100 * time.Millisecond) // cycle
|
||||
k.MaxRetries = 50 |
||||
k.RetryExponent = 2 |
||||
sleep := func(n int) { |
||||
ts := k.RetryInterval |
||||
for i := 1; i < n; i++ { |
||||
ts *= int64(k.RetryExponent) |
||||
} |
||||
time.Sleep(time.Duration(ts)) |
||||
} |
||||
|
||||
k.Register("01000000") |
||||
k.On("00000001", "00000010") |
||||
err := testSuggestPeer(t, k, "01000000", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
sleep(1) |
||||
err = testSuggestPeer(t, k, "01000000", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
sleep(1) |
||||
err = testSuggestPeer(t, k, "01000000", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
sleep(2) |
||||
err = testSuggestPeer(t, k, "01000000", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
sleep(2) |
||||
err = testSuggestPeer(t, k, "<nil>", 0, false) |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestKademliaHiveString(t *testing.T) { |
||||
k := newTestKademlia("00000000").On("01000000", "00100000").Register("10000000", "10000001") |
||||
k.MaxProxDisplay = 8 |
||||
h := k.String() |
||||
expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), MinProxBinSize: 2, MinBinSize: 1, MaxBinSize: 4\n000 0 | 2 8100 (0) 8000 (0)\n============ DEPTH: 1 ==========================================\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n=========================================================================" |
||||
if expH[104:] != h[104:] { |
||||
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h) |
||||
} |
||||
} |
||||
|
||||
// testKademliaCase constructs the kademlia and PeerPot map to validate
|
||||
// the SuggestPeer and Healthy methods for provided hex-encoded addresses.
|
||||
// Argument pivotAddr is the address of the kademlia.
|
||||
func testKademliaCase(t *testing.T, pivotAddr string, addrs ...string) { |
||||
addr := common.FromHex(pivotAddr) |
||||
addrs = append(addrs, pivotAddr) |
||||
|
||||
k := NewKademlia(addr, NewKadParams()) |
||||
|
||||
as := make([][]byte, len(addrs)) |
||||
for i, a := range addrs { |
||||
as[i] = common.FromHex(a) |
||||
} |
||||
|
||||
for _, a := range as { |
||||
if bytes.Equal(a, addr) { |
||||
continue |
||||
} |
||||
p := &BzzAddr{OAddr: a, UAddr: a} |
||||
if err := k.Register([]OverlayAddr{p}); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
ppmap := NewPeerPotMap(2, as) |
||||
|
||||
pp := ppmap[pivotAddr] |
||||
|
||||
for { |
||||
a, _, _ := k.SuggestPeer() |
||||
if a == nil { |
||||
break |
||||
} |
||||
k.On(&BzzPeer{BzzAddr: a.(*BzzAddr)}) |
||||
} |
||||
|
||||
h := k.Healthy(pp) |
||||
if !(h.GotNN && h.KnowNN && h.Full) { |
||||
t.Error("not healthy") |
||||
} |
||||
} |
||||
|
||||
/* |
||||
The regression test for the following invalid kademlia edge case. |
||||
|
||||
Addresses used in this test are discovered as part of the simulation network |
||||
in higher level tests for streaming. They were generated randomly. |
||||
|
||||
========================================================================= |
||||
Mon Apr 9 12:18:24 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1 |
||||
population: 9 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4 |
||||
000 2 d7e5 ec56 | 18 ec56 (0) d7e5 (0) d9e0 (0) c735 (0) |
||||
001 2 18f1 3176 | 14 18f1 (0) 10bb (0) 10d1 (0) 0421 (0) |
||||
002 2 52aa 47cd | 11 52aa (0) 51d9 (0) 5161 (0) 5130 (0) |
||||
003 1 646e | 1 646e (0) |
||||
004 0 | 3 769c (0) 76d1 (0) 7656 (0) |
||||
============ DEPTH: 5 ========================================== |
||||
005 1 7a48 | 1 7a48 (0) |
||||
006 1 7cbd | 1 7cbd (0) |
||||
007 0 | 0 |
||||
008 0 | 0 |
||||
009 0 | 0 |
||||
010 0 | 0 |
||||
011 0 | 0 |
||||
012 0 | 0 |
||||
013 0 | 0 |
||||
014 0 | 0 |
||||
015 0 | 0 |
||||
========================================================================= |
||||
*/ |
||||
func TestKademliaCase1(t *testing.T) { |
||||
testKademliaCase(t, |
||||
"7efef1c41d77f843ad167be95f6660567eb8a4a59f39240000cce2e0d65baf8e", |
||||
"ec560e6a4806aa37f147ee83687f3cf044d9953e61eedb8c34b6d50d9e2c5623", |
||||
"646e9540c84f6a2f9cf6585d45a4c219573b4fd1b64a3c9a1386fc5cf98c0d4d", |
||||
"18f13c5fba653781019025ab10e8d2fdc916d6448729268afe9e928ffcdbb8e8", |
||||
"317617acf99b4ffddda8a736f8fc6c6ede0bf690bc23d834123823e6d03e2f69", |
||||
"d7e52d9647a5d1c27a68c3ee65d543be3947ae4b68537b236d71ef9cb15fb9ab", |
||||
"7a48f75f8ca60487ae42d6f92b785581b40b91f2da551ae73d5eae46640e02e8", |
||||
"7cbd42350bde8e18ae5b955b5450f8e2cef3419f92fbf5598160c60fd78619f0", |
||||
"52aa3ddec61f4d48dd505a2385403c634f6ad06ee1d99c5c90a5ba6006f9af9c", |
||||
"47cdb6fa93eeb8bc91a417ff4e3b14a9c2ea85137462e2f575fae97f0c4be60d", |
||||
"5161943eb42e2a03e715fe8afa1009ff5200060c870ead6ab103f63f26cb107f", |
||||
"a38eaa1255f76bf883ca0830c86e8c4bb7eed259a8348aae9b03f21f90105bee", |
||||
"b2522bdf1ab26f324e75424fdf6e493b47e8a27687fe76347607b344fc010075", |
||||
"5bd7213964efb2580b91d02ac31ef126838abeba342f5dbdbe8d4d03562671a2", |
||||
"0b531adb82744768b694d7f94f73d4f0c9de591266108daeb8c74066bfc9c9ca", |
||||
"28501f59f70e888d399570145ed884353e017443c675aa12731ada7c87ea14f7", |
||||
"4a45f1fc63e1a9cb9dfa44c98da2f3d20c2923e5d75ff60b2db9d1bdb0c54d51", |
||||
"b193431ee35cd32de95805e7c1c749450c47486595aae7195ea6b6019a64fd61", |
||||
"baebf36a1e35a7ed834e1c72faf44ba16c159fa47d3289ceb3ca35fefa8739b5", |
||||
"a3659bd32e05fa36c8d20dbaaed8362bf1a8a7bd116aed62d8a43a2efbdf513f", |
||||
"10d1b50881a4770ebebdd0a75589dabb931e6716747b0f65fd6b080b88c4fdb6", |
||||
"3c76b8ca5c7ce6a03320646826213f59229626bf5b9d25da0c3ec0662dcb8ff3", |
||||
"4d72a04ddeb851a68cd197ef9a92a3e2ff01fbbff638e64929dd1a9c2e150112", |
||||
"c7353d320987956075b5bc1668571c7a36c800d5598fdc4832ec6569561e15d1", |
||||
"d9e0c7c90878c20ab7639d5954756f54775404b3483407fe1b483635182734f6", |
||||
"8fca67216b7939c0824fb06c5279901a94da41da9482b000f56df9906736ee75", |
||||
"460719d7f7aa7d7438f0eaf30333484fa3bd0f233632c10ba89e6e46dd3604be", |
||||
"0421d92c8a1c79ed5d01305a3d25aaf22a8f5f9e3d4bc80da47ee16ce20465fe", |
||||
"3441d9d9c0f05820a1bb6459fc7d8ef266a1bd929e7db939a10f544efe8261ea", |
||||
"ab198a66c293586746758468c610e5d3914d4ce629147eff6dd55a31f863ff8f", |
||||
"3a1c8c16b0763f3d2c35269f454ff779d1255e954d2deaf6c040fb3f0bcdc945", |
||||
"5561c0ea3b203e173b11e6aa9d0e621a4e10b1d8b178b8fe375220806557b823", |
||||
"7656caccdc79cd8d7ce66d415cc96a718e8271c62fb35746bfc2b49faf3eebf3", |
||||
"5130594fd54c1652cf2debde2c4204573ed76555d1e26757fe345b409af1544a", |
||||
"76d1e83c71ca246d042e37ff1db181f2776265fbcfdc890ce230bfa617c9c2f0", |
||||
"89580231962624c53968c1b0095b4a2732b2a2640a19fdd7d21fd064fcc0a5ef", |
||||
"3d10d001fff44680c7417dd66ecf2e984f0baa20a9bbcea348583ba5ff210c4f", |
||||
"43754e323f0f3a1155b1852bd6edd55da86b8c4cfe3df8b33733fca50fc202b8", |
||||
"a9e7b1bb763ae6452ddcacd174993f82977d81a85206bb2ae3c842e2d8e19b4c", |
||||
"10bb07da7bc7c7757f74149eff167d528a94a253cdc694a863f4d50054c00b6d", |
||||
"28f0bc1b44658548d6e05dd16d4c2fe77f1da5d48b6774bc4263b045725d0c19", |
||||
"835fbbf1d16ba7347b6e2fc552d6e982148d29c624ea20383850df3c810fa8fc", |
||||
"8e236c56a77d7f46e41e80f7092b1a68cd8e92f6156365f41813ad1ca2c6b6f3", |
||||
"51d9c857e9238c49186e37b4eccf17a82de3d5739f026f6043798ab531456e73", |
||||
"bbddf7db6a682225301f36a9fd5b0d0121d2951753e1681295f3465352ad511f", |
||||
"2690a910c33ee37b91eb6c4e0731d1d345e2dc3b46d308503a6e85bbc242c69e", |
||||
"769ce86aa90b518b7ed382f9fdacfbed93574e18dc98fe6c342e4f9f409c2d5a", |
||||
"ba3bebec689ce51d3e12776c45f80d25164fdfb694a8122d908081aaa2e7122c", |
||||
"3a51f4146ea90a815d0d283d1ceb20b928d8b4d45875e892696986a3c0d8fb9b", |
||||
"81968a2d8fb39114342ee1da85254ec51e0608d7f0f6997c2a8354c260a71009", |
||||
) |
||||
} |
||||
|
||||
/* |
||||
The regression test for the following invalid kademlia edge case. |
||||
|
||||
Addresses used in this test are discovered as part of the simulation network |
||||
in higher level tests for streaming. They were generated randomly. |
||||
|
||||
========================================================================= |
||||
Mon Apr 9 18:43:48 UTC 2018 KΛÐΞMLIΛ hive: queen's address: bc7f3b |
||||
population: 9 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4 |
||||
000 2 0f49 67ff | 28 0f49 (0) 0211 (0) 07b2 (0) 0703 (0) |
||||
001 2 e84b f3a4 | 13 f3a4 (0) e84b (0) e58b (0) e60b (0) |
||||
002 1 8dba | 1 8dba (0) |
||||
003 2 a008 ad72 | 2 ad72 (0) a008 (0) |
||||
004 0 | 3 b61f (0) b27f (0) b027 (0) |
||||
============ DEPTH: 5 ========================================== |
||||
005 1 ba19 | 1 ba19 (0) |
||||
006 0 | 0 |
||||
007 1 bdd6 | 1 bdd6 (0) |
||||
008 0 | 0 |
||||
009 0 | 0 |
||||
010 0 | 0 |
||||
011 0 | 0 |
||||
012 0 | 0 |
||||
013 0 | 0 |
||||
014 0 | 0 |
||||
015 0 | 0 |
||||
========================================================================= |
||||
*/ |
||||
func TestKademliaCase2(t *testing.T) { |
||||
testKademliaCase(t, |
||||
"bc7f3b6a4a7e3c91b100ca6680b6c06ff407972b88956324ca853295893e0237", "67ffb61d3aa27449d277016188f35f19e2321fbda5008c68cf6303faa080534f", "600cd54c842eadac1729c04abfc369bc244572ca76117105b9dd910283b82730", "d955a05409650de151218557425105a8aa2867bb6a0e0462fa1cf90abcf87ad6", "7a6b726de45abdf7bb3e5fd9fb0dc8932270ca4dedef92238c80c05bcdb570e3", "263e99424ebfdb652adb4e3dcd27d59e11bb7ae1c057b3ef6f390d0228006254", "ba195d1a53aafde68e661c64d39db8c2a73505bf336125c15c3560de3b48b7ed", "3458c762169937115f67cabc35a6c384ed70293a8aec37b077a6c1b8e02d510e", "4ef4dc2e28ac6efdba57e134ac24dd4e0be68b9d54f7006515eb9509105f700c", "2a8782b79b0c24b9714dfd2c8ff1932bebc08aa6520b4eaeaa59ff781238890c", "625d02e960506f4524e9cdeac85b33faf3ea437fceadbd478b62b78720cf24fc", "e051a36a8c8637f520ba259c9ed3fadaf740dadc6a04c3f0e21778ebd4cd6ac4", "e34bc014fa2504f707bb3d904872b56c2fa250bee3cb19a147a0418541f1bd90", "28036dc79add95799916893890add5d8972f3b95325a509d6ded3d448f4dc652", "1b013c407794fa2e4c955d8f51cbc6bd78588a174b6548246b291281304b5409", "34f71b68698e1534095ff23ee9c35bf64c7f12b8463e7c6f6b19c25cf03928b4", "c712c6e9bbb7076832972a95890e340b94ed735935c3c0bb788e61f011b59479", "a008d5becdcda4b9dbfdaafc3cec586cf61dcf2d4b713b6168fff02e3b9f0b08", "29de15555cdbebaab214009e416ee92f947dcec5dab9894129f50f1b17138f34", "5df9449f700bd4b5a23688b68b293f2e92fa6ca524c93bc6bb9936efba9d9ada", "3ab0168a5f87fedc6a39b53c628256ac87a98670d8691bbdaaecec22418d13a2", "1ee299b2d2a74a568494130e6869e66d57982d345c482a0e0eeb285ac219ae3b", "e0e0e3b860cea9b7a74cf1b0675cc632dc64e80a02f20bbc5e96e2e8bb670606", "dc1ba6f169b0fcdcca021dcebaf39fe5d4875e7e69b854fad65687c1d7719ec0", "d321f73e42fcfb1d3a303eddf018ca5dffdcfd5567cd5ec1212f045f6a07e47d", "070320c3da7b542e5ca8aaf6a0a53d2bb5113ed264ab1db2dceee17c729edcb1", "17d314d65fdd136b50d182d2c8f5edf16e7838c2be8cf2c00abe4b406dbcd1d8", "e60b99e0a06f7d2d99d84085f67cdf8cc22a9ae22c339365d80f90289834a2b4", "02115771e18932e1f67a45f11f5bf743c5dae97fbc477d34d35c996012420eac", "3102a40eb2e5060353dd19bf61eeec8782dd1bebfcb57f4c796912252b591827", "8dbaf231062f2dc7ddaba5f9c7761b0c21292be51bf8c2ef503f31d4a2f63f79", "b02787b713c83a9f9183216310f04251994e04c2763a9024731562e8978e7cc4", "b27fe6cd33989e10909ce794c4b0b88feae286b614a59d49a3444c1a7b51ea82", "07b2d2c94fdc6fd148fe23be2ed9eff54f5e12548f29ed8416e6860fc894466f", "e58bf9f451ef62ac44ff0a9bb0610ec0fd14d423235954f0d3695e83017cbfc4", "bdd600b91bb79d1ee0053b854de308cfaa7e2abce575ea6815a0a7b3449609c2", "0f49c93c1edc7999920b21977cedd51a763940dac32e319feb9c1df2da0f3071", "7cbf0297cd41acf655cd6f960d7aaf61479edb4189d5c001cbc730861f0deb41", "79265193778d87ad626a5f59397bc075872d7302a12634ce2451a767d0a82da2", "2fe7d705f7c370b9243dbaafe007d555ff58d218822fca49d347b12a0282457c", "e84bc0c83d05e55a0080eed41dda5a795da4b9313a4da697142e69a65834cbb3", "cc4d278bd9aa0e9fb3cd8d2e0d68fb791aab5de4b120b845c409effbed47a180", "1a2317a8646cd4b6d3c4aa4cc25f676533abb689cf180787db216880a1239ad8", "cbafd6568cf8e99076208e6b6843f5808a7087897c67aad0c54694669398f889", "7b7c8357255fc37b4dae0e1af61589035fd39ff627e0938c6b3da8b4e4ec5d23", "2b8d782c1f5bac46c922cf439f6aa79f91e9ba5ffc0020d58455188a2075b334", "b61f45af2306705740742e76197a119235584ced01ef3f7cf3d4370f6c557cd1", "2775612e7cdae2780bf494c370bdcbe69c55e4a1363b1dc79ea0135e61221cce", "f3a49bb22f40885e961299abfa697a7df690a79f067bf3a4847a3ad48d826c9f", "ad724ac218dc133c0aadf4618eae21fdd0c2f3787af279846b49e2b4f97ff167", |
||||
) |
||||
} |
||||
|
||||
/* |
||||
The regression test for the following invalid kademlia edge case. |
||||
|
||||
Addresses used in this test are discovered as part of the simulation network |
||||
in higher level tests for streaming. They were generated randomly. |
||||
|
||||
========================================================================= |
||||
Mon Apr 9 19:04:35 UTC 2018 KΛÐΞMLIΛ hive: queen's address: b4822e |
||||
population: 8 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4 |
||||
000 2 786c 774b | 29 774b (0) 786c (0) 7a79 (0) 7d2f (0) |
||||
001 2 d9de cf19 | 10 cf19 (0) d9de (0) d2ff (0) d2a2 (0) |
||||
002 2 8ca1 8d74 | 5 8d74 (0) 8ca1 (0) 9793 (0) 9f51 (0) |
||||
003 0 | 0 |
||||
004 0 | 3 bfac (0) bcbb (0) bde9 (0) |
||||
005 0 | 0 |
||||
============ DEPTH: 6 ========================================== |
||||
006 1 b660 | 1 b660 (0) |
||||
007 0 | 0 |
||||
008 1 b450 | 1 b450 (0) |
||||
009 0 | 0 |
||||
010 0 | 0 |
||||
011 0 | 0 |
||||
012 0 | 0 |
||||
013 0 | 0 |
||||
014 0 | 0 |
||||
015 0 | 0 |
||||
========================================================================= |
||||
*/ |
||||
func TestKademliaCase3(t *testing.T) { |
||||
testKademliaCase(t, |
||||
"b4822e874a01b94ac3a35c821e6db131e785c2fcbb3556e84b36102caf09b091", "2ecf54ea38d58f9cfc3862e54e5854a7c506fbc640e0b38e46d7d45a19794999", "442374092be50fc7392e8dd3f6fab3158ff7f14f26ff98060aed9b2eecf0b97d", "b450a4a67fcfa3b976cf023d8f1f15052b727f712198ce901630efe2f95db191", "9a7291638eb1c989a6dd6661a42c735b23ac6605b5d3e428aa5ffe650e892c85", "67f62eeab9804cfcac02b25ebeab9113d1b9d03dd5200b1c5a324cc0163e722f", "2e4a0e4b53bca4a9d7e2734150e9f579f29a255ade18a268461b20d026c9ee90", "30dd79c5fcdaa1b106f6960c45c9fde7c046aa3d931088d98c52ab759d0b2ac4", "97936fb5a581e59753c54fa5feec493714f2218245f61f97a62eafd4699433e4", "3a2899b6e129e3e193f6e2aefb82589c948c246d2ec1d4272af32ef3b2660f44", "f0e2a8aa88e67269e9952431ef12e5b29b7f41a1871fbfc38567fad95655d607", "7fa12b3f3c5f8383bfc644b958f72a486969733fa097d8952b3eb4f7b4f73192", "360c167aad5fc992656d6010ec45fdce5bcd492ad9608bc515e2be70d4e430c1", "fe21bc969b3d8e5a64a6484a829c1e04208f26f3cd4de6afcbc172a5bd17f1f1", "b660a1f40141d7ccd282fe5bd9838744119bd1cb3780498b5173578cc5ad308f", "44dcb3370e76680e2fba8cd986ad45ff0b77ca45680ee8d950e47922c4af6226", "8ca126923d17fccb689647307b89f38aa14e2a7b9ebcf3c1e31ccf3d2291a3bc", "f0ae19ae9ce6329327cbf42baf090e084c196b0877d8c7b69997e0123be23ef8", "d2a2a217385158e3e1e348883a14bc423e57daa12077e8c49797d16121ea0810", "f5467ccd85bb4ebe768527db520a210459969a5f1fae6e07b43f519799f0b224", "68be5fd9f9d142a5099e3609011fe3bab7bb992c595999e31e0b3d1668dfb3cf", "4d49a8a476e4934afc6b5c36db9bece3ed1804f20b952da5a21b2b0de766aa73", "ea7155745ef3fb2d099513887a2ba279333ced65c65facbd890ce58bd3fce772", "cf19f51f4e848053d289ac95a9138cdd23fc3077ae913cd58cda8cc7a521b2e1", "590b1cd41c7e6144e76b5cd515a3a4d0a4317624620a3f1685f43ae68bdcd890", "d2ffe0626b5f94a7e00fa0b506e7455a3d9399c15800db108d5e715ef5f6e346", "69630878c50a91f6c2edd23a706bfa0b50bd5661672a37d67bab38e6bca3b698", "445e9067079899bb5faafaca915ae6c0f6b1b730a5a628835dd827636f7feb1e", "6461c77491f1c4825958949f23c153e6e1759a5be53abbcee17c9da3867f3141", "23a235f4083771ccc207771daceda700b525a59ab586788d4f6892e69e34a6e2", "bde99f79ef41a81607ddcf92b9f95dcbc6c3537e91e8bf740e193dc73b19485e", "177957c0e5f0fbd12b88022a91768095d193830986caec8d888097d3ff4310b8", "bcbbdbaa4cdf8352422072f332e05111b732354a35c4d7c617ce1fc3b8b42a5a", "774b6717fdfb0d1629fb9d4c04a9ca40079ae2955d7f82e897477055ed017abb", "16443bf625be6d39ecaa6f114e5d2c1d47a64bfd3c13808d94b55b6b6acef2ee", "8d7495d9008066505ed00ce8198af82bfa5a6b4c08768b4c9fb3aa4eb0b0cca2", "15800849a53349508cb382959527f6c3cf1a46158ff1e6e2316b7dea7967e35f", "7a792f0f4a2b731781d1b244b2a57947f1a2e32900a1c0793449f9f7ae18a7b7", "5e517c2832c9deaa7df77c7bad4d20fd6eda2b7815e155e68bc48238fac1416f", "9f51a14f0019c72bd1d472706d8c80a18c1873c6a0663e754b60eae8094483d7", "7d2fabb565122521d22ba99fed9e5be6a458fbc93156d54db27d97a00b8c3a97", "786c9e412a7db4ec278891fa534caa9a1d1a028c631c6f3aeb9c4d96ad895c36", "3bd6341d40641c2632a5a0cd7a63553a04e251efd7195897a1d27e02a7a8bfde", "31efd1f5fb57b8cff0318d77a1a9e8d67e1d1c8d18ce90f99c3a240dff48cdc8", "d9de3e1156ce1380150948acbcfecd99c96e7f4b0bc97745f4681593d017f74f", "427a2201e09f9583cd990c03b81b58148c297d474a3b50f498d83b1c7a9414cd", "bfaca11596d3dec406a9fcf5d97536516dfe7f0e3b12078428a7e1700e25218a", "351c4770a097248a650008152d0cab5825d048bef770da7f3364f59d1e721bc0", "ee00f205d1486b2be7381d962bd2867263758e880529e4e2bfedfa613bbc0e71", "6aa3b6418d89e3348e4859c823ef4d6d7cd46aa7f7e77aba586c4214d760d8f8", |
||||
) |
||||
} |
||||
|
||||
/* |
||||
The regression test for the following invalid kademlia edge case. |
||||
|
||||
Addresses used in this test are discovered as part of the simulation network |
||||
in higher level tests for streaming. They were generated randomly. |
||||
|
||||
========================================================================= |
||||
Mon Apr 9 19:16:25 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 9a90fe |
||||
population: 8 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4 |
||||
000 2 72ef 4e6c | 24 0b1e (0) 0d66 (0) 17f5 (0) 17e8 (0) |
||||
001 2 fc2b fa47 | 13 fa47 (0) fc2b (0) fffd (0) ecef (0) |
||||
002 2 b847 afa8 | 6 afa8 (0) ad77 (0) bb7c (0) b847 (0) |
||||
003 0 | 0 |
||||
004 0 | 4 91fc (0) 957d (0) 9482 (0) 949a (0) |
||||
============ DEPTH: 5 ========================================== |
||||
005 1 9ccf | 1 9ccf (0) |
||||
006 0 | 0 |
||||
007 1 9bb2 | 1 9bb2 (0) |
||||
008 0 | 0 |
||||
009 0 | 0 |
||||
010 0 | 0 |
||||
011 0 | 0 |
||||
012 0 | 0 |
||||
013 0 | 0 |
||||
014 0 | 0 |
||||
015 0 | 0 |
||||
========================================================================= |
||||
*/ |
||||
func TestKademliaCase4(t *testing.T) { |
||||
testKademliaCase(t, |
||||
"9a90fe3506277244549064b8c3276abb06284a199d9063a97331947f2b7da7f4", |
||||
"c19359eddef24b7be1a833b4475f212cd944263627a53f9ef4837d106c247730", "fc2b6fef99ef947f7e57c3df376891769e2a2fd83d2b8e634e0fc1e91eaa080c", "ecefc0e1a8ea7bb4b48c469e077401fce175dd75294255b96c4e54f6a2950a55", "bb7ce598efc056bba343cc2614aa3f67a575557561290b44c73a63f8f433f9f7", "55fbee6ca52dfd7f0be0db969ee8e524b654ab4f0cce7c05d83887d7d2a15460", "afa852b6b319998c6a283cc0c82d2f5b8e9410075d7700f3012761f1cfbd0f76", "36c370cfb63f2087971ba6e58d7585b04e16b8f0da335efb91554c2dd8fe191c", "6be41e029985edebc901fb77fc4fb65516b6d85086e2a98bfa3159c99391e585", "dd3cfc72ea553e7d2b28f0037a65646b30955b929d29ba4c40f4a2a811248e77", "da3a8f18e09c7b0ca235c4e33e1441a5188f1df023138bf207753ee63e768f7d", "de9e3ab4dc572d54a2d4b878329fd832bb51a149f4ce167316eeb177b61e7e01", "4e6c1ecde6ed917706257fe020a1d02d2e9d87fca4c85f0f7b132491008c5032", "72ef04b77a070e13463b3529dd312bcacfb7a12d20dc597f5ec3de0501e9b834", "3fef57186675d524ab8bb1f54ba8cb68610babca1247c0c46dbb60aed003c69d", "1d8e6b71f7a052865d6558d4ba44ad5fab7b908cc1badf5766822e1c20d0d823", "6be2f2b4ffa173014d4ec7df157d289744a2bda54bb876b264ccfa898a0da315", "b0ba3fff8643f9985c744327b0c4c869763509fd5da2de9a80a4a0a082021255", "9ccf40b9406ba2e6567101fb9b4e5334a9ec74263eff47267da266ba45e6c158", "d7347f02c180a448e60f73931845062ce00048750b584790278e9c93ef31ad81", "b68c6359a22b3bee6fecb8804311cfd816648ea31d530c9fb48e477e029d707a", "0d668a18ad7c2820214df6df95a6c855ce19fb1cb765f8ca620e45db76686d37", "3fbd2663bff65533246f1fabb9f38086854c6218aeb3dc9ac6ac73d4f0988f91", "949aa5719ca846052bfaa1b38c97b6eca3df3e24c0e0630042c6bccafbb4cdb5", "77b8a2b917bef5d54f3792183b014cca7798f713ff14fe0b2ac79b4c9f6f996d", "17e853cbd8dc00cba3cd9ffeb36f26a9f41a0eb92f80b62c2cda16771c935388", "5f682ed7a8cf2f98387c3def7c97f9f05ae39e39d393eeca3cf621268d6347f8", "ad77487eaf11fd8084ba4517a51766eb0e5b77dd3492dfa79aa3a2802fb29d20", "d247cfcacf9a8200ebaddf639f8c926ab0a001abe682f40df3785e80ed124e91", "195589442e11907eede1ee6524157f1125f68399f3170c835ff81c603b069f6c", "5b5ca0a67f3c54e7d3a6a862ef56168ec9ed1f4945e6c24de6d336b2be2e6f8c", "56430e4caa253015f1f998dce4a48a88af1953f68e94eca14f53074ae9c3e467", "0b1eed6a5bf612d1d8e08f5c546f3d12e838568fd3aa43ed4c537f10c65545d6", "7058db19a56dfff01988ac4a62e1310597f9c8d7ebde6890dadabf047d722d39", "b847380d6888ff7cd11402d086b19eccc40950b52c9d67e73cb4f8462f5df078", "df6c048419a2290ab546d527e9eeba349e7f7e1759bafe4adac507ce60ef9670", "91fc5b4b24fc3fbfea7f9a3d0f0437cb5733c0c2345d8bdffd7048d6e3b8a37b", "957d8ea51b37523952b6f5ae95462fcd4aed1483ef32cc80b69580aaeee03606", "efa82e4e91ad9ab781977400e9ac0bb9de7389aaedebdae979b73d1d3b8d72b0", "7400c9f3f3fc0cc6fe8cc37ab24b9771f44e9f78be913f73cd35fc4be030d6bd", "9bb28f4122d61f7bb56fe27ef706159fb802fef0f5de9dfa32c9c5b3183235f1", "40a8de6e98953498b806614532ea4abf8b99ad7f9719fb68203a6eae2efa5b2a", "412de0b218b8f7dcacc9205cd16ffb4eca5b838f46a2f4f9f534026061a47308", "17f56ecad51075080680ad9faa0fd8946b824d3296ddb20be07f9809fe8d1c5a", "fffd4e7ae885a41948a342b6647955a7ec8a8039039f510cff467ef597675457", "35e78e11b5ac46a29dd04ab0043136c3291f4ca56cb949ace33111ed56395463", "94824fc80230af82077c83bfc01dc9675b1f9d3d538b1e5f41c21ac753598691", "fa470ae314ca3fce493f21b423eef2a49522e09126f6f2326fa3c9cac0b344f7", "7078860b5b621b21ac7b95f9fc4739c8235ce5066a8b9bd7d938146a34fa88ec", "eea53560f0428bfd2eca4f86a5ce9dec5ff1309129a975d73465c1c9e9da71d1", |
||||
) |
||||
} |
||||
|
||||
/* |
||||
The regression test for the following invalid kademlia edge case. |
||||
|
||||
Addresses used in this test are discovered as part of the simulation network |
||||
in higher level tests for streaming. They were generated randomly. |
||||
|
||||
========================================================================= |
||||
Mon Apr 9 19:25:18 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 5dd5c7 |
||||
population: 13 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4 |
||||
000 2 e528 fad0 | 22 fad0 (0) e528 (0) e3bb (0) ed13 (0) |
||||
001 3 3f30 18e0 1dd3 | 7 3f30 (0) 23db (0) 10b6 (0) 18e0 (0) |
||||
002 4 7c54 7804 61e4 60f9 | 10 61e4 (0) 60f9 (0) 636c (0) 7186 (0) |
||||
003 2 40ae 4bae | 5 4bae (0) 4d5c (0) 403a (0) 40ae (0) |
||||
004 0 | 0 |
||||
005 0 | 3 5808 (0) 5a0e (0) 5bdb (0) |
||||
============ DEPTH: 6 ========================================== |
||||
006 2 5f14 5f61 | 2 5f14 (0) 5f61 (0) |
||||
007 0 | 0 |
||||
008 0 | 0 |
||||
009 0 | 0 |
||||
010 0 | 0 |
||||
011 0 | 0 |
||||
012 0 | 0 |
||||
013 0 | 0 |
||||
014 0 | 0 |
||||
015 0 | 0 |
||||
========================================================================= |
||||
*/ |
||||
func TestKademliaCase5(t *testing.T) { |
||||
testKademliaCase(t, |
||||
"5dd5c77dd9006a800478fcebb02d48d4036389e7d3c8f6a83b97dbad13f4c0a9", |
||||
"78fafa0809929a1279ece089a51d12457c2d8416dff859aeb2ccc24bb50df5ec", "1dd39b1257e745f147cbbc3cadd609ccd6207c41056dbc4254bba5d2527d3ee5", "5f61dd66d4d94aec8fcc3ce0e7885c7edf30c43143fa730e2841c5d28e3cd081", "8aa8b0472cb351d967e575ad05c4b9f393e76c4b01ef4b3a54aac5283b78abc9", "4502f385152a915b438a6726ce3ea9342e7a6db91a23c2f6bee83a885ed7eb82", "718677a504249db47525e959ef1784bed167e1c46f1e0275b9c7b588e28a3758", "7c54c6ed1f8376323896ed3a4e048866410de189e9599dd89bf312ca4adb96b5", "18e03bd3378126c09e799a497150da5c24c895aedc84b6f0dbae41fc4bac081a", "23db76ac9e6e58d9f5395ca78252513a7b4118b4155f8462d3d5eec62486cadc", "40ae0e8f065e96c7adb7fa39505136401f01780481e678d718b7f6dbb2c906ec", "c1539998b8bae19d339d6bbb691f4e9daeb0e86847545229e80fe0dffe716e92", "ed139d73a2699e205574c08722ca9f030ad2d866c662f1112a276b91421c3cb9", "5bdb19584b7a36d09ca689422ef7e6bb681b8f2558a6b2177a8f7c812f631022", "636c9de7fe234ffc15d67a504c69702c719f626c17461d3f2918e924cd9d69e2", "de4455413ff9335c440d52458c6544191bd58a16d85f700c1de53b62773064ea", "de1963310849527acabc7885b6e345a56406a8f23e35e436b6d9725e69a79a83", "a80a50a467f561210a114cba6c7fb1489ed43a14d61a9edd70e2eb15c31f074d", "7804f12b8d8e6e4b375b242058242068a3809385e05df0e64973cde805cf729c", "60f9aa320c02c6f2e6370aa740cf7cea38083fa95fca8c99552cda52935c1520", "d8da963602390f6c002c00ce62a84b514edfce9ebde035b277a957264bb54d21", "8463d93256e026fe436abad44697152b9a56ac8e06a0583d318e9571b83d073c", "9a3f78fcefb9a05e40a23de55f6153d7a8b9d973ede43a380bf46bb3b3847de1", "e3bb576f4b3760b9ca6bff59326f4ebfc4a669d263fb7d67ab9797adea54ed13", "4d5cdbd6dcca5bdf819a0fe8d175dc55cc96f088d37462acd5ea14bc6296bdbe", "5a0ed28de7b5258c727cb85447071c74c00a5fbba9e6bc0393bc51944d04ab2a", "61e4ddb479c283c638f4edec24353b6cc7a3a13b930824aad016b0996ca93c47", "7e3610868acf714836cafaaa7b8c009a9ac6e3a6d443e5586cf661530a204ee2", "d74b244d4345d2c86e30a097105e4fb133d53c578320285132a952cdaa64416e", "cfeed57d0f935bfab89e3f630a7c97e0b1605f0724d85a008bbfb92cb47863a8", "580837af95055670e20d494978f60c7f1458dc4b9e389fc7aa4982b2aca3bce3", "df55c0c49e6c8a83d82dfa1c307d3bf6a20e18721c80d8ec4f1f68dc0a137ced", "5f149c51ce581ba32a285439a806c063ced01ccd4211cd024e6a615b8f216f95", "1eb76b00aeb127b10dd1b7cd4c3edeb4d812b5a658f0feb13e85c4d2b7c6fe06", "7a56ba7c3fb7cbfb5561a46a75d95d7722096b45771ec16e6fa7bbfab0b35dfe", "4bae85ad88c28470f0015246d530adc0cd1778bdd5145c3c6b538ee50c4e04bd", "afd1892e2a7145c99ec0ebe9ded0d3fec21089b277a68d47f45961ec5e39e7e0", "953138885d7b36b0ef79e46030f8e61fd7037fbe5ce9e0a94d728e8c8d7eab86", "de761613ef305e4f628cb6bf97d7b7dc69a9d513dc233630792de97bcda777a6", "3f3087280063d09504c084bbf7fdf984347a72b50d097fd5b086ffabb5b3fb4c", "7d18a94bb1ebfdef4d3e454d2db8cb772f30ca57920dd1e402184a9e598581a0", "a7d6fbdc9126d9f10d10617f49fb9f5474ffe1b229f76b7dd27cebba30eccb5d", "fad0246303618353d1387ec10c09ee991eb6180697ed3470ed9a6b377695203d", "1cf66e09ea51ee5c23df26615a9e7420be2ac8063f28f60a3bc86020e94fe6f3", "8269cdaa153da7c358b0b940791af74d7c651cd4d3f5ed13acfe6d0f2c539e7f", "90d52eaaa60e74bf1c79106113f2599471a902d7b1c39ac1f55b20604f453c09", "9788fd0c09190a3f3d0541f68073a2f44c2fcc45bb97558a7c319f36c25a75b3", "10b68fc44157ecfdae238ee6c1ce0333f906ad04d1a4cb1505c8e35c3c87fbb0", "e5284117fdf3757920475c786e0004cb00ba0932163659a89b36651a01e57394", "403ad51d911e113dcd5f9ff58c94f6d278886a2a4da64c3ceca2083282c92de3", |
||||
) |
||||
} |
@ -1,308 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/contracts/chequebook" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/swarm/network/kademlia" |
||||
"github.com/ethereum/go-ethereum/swarm/services/swap" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
/* |
||||
BZZ protocol Message Types and Message Data Types |
||||
*/ |
||||
|
||||
// bzz protocol message codes
|
||||
const ( |
||||
statusMsg = iota // 0x01
|
||||
storeRequestMsg // 0x02
|
||||
retrieveRequestMsg // 0x03
|
||||
peersMsg // 0x04
|
||||
syncRequestMsg // 0x05
|
||||
deliveryRequestMsg // 0x06
|
||||
unsyncedKeysMsg // 0x07
|
||||
paymentMsg // 0x08
|
||||
) |
||||
|
||||
/* |
||||
Handshake |
||||
|
||||
* Version: 8 byte integer version of the protocol |
||||
* ID: arbitrary byte sequence client identifier human readable |
||||
* Addr: the address advertised by the node, format similar to DEVp2p wire protocol |
||||
* Swap: info for the swarm accounting protocol |
||||
* NetworkID: 8 byte integer network identifier |
||||
* Caps: swarm-specific capabilities, format identical to devp2p |
||||
* SyncState: syncronisation state (db iterator key and address space etc) persisted about the peer |
||||
|
||||
*/ |
||||
type statusMsgData struct { |
||||
Version uint64 |
||||
ID string |
||||
Addr *peerAddr |
||||
Swap *swap.SwapProfile |
||||
NetworkId uint64 |
||||
} |
||||
|
||||
func (self *statusMsgData) String() string { |
||||
return fmt.Sprintf("Status: Version: %v, ID: %v, Addr: %v, Swap: %v, NetworkId: %v", self.Version, self.ID, self.Addr, self.Swap, self.NetworkId) |
||||
} |
||||
|
||||
/* |
||||
store requests are forwarded to the peers in their kademlia proximity bin |
||||
if they are distant |
||||
if they are within our storage radius or have any incentive to store it |
||||
then attach your nodeID to the metadata |
||||
if the storage request is sufficiently close (within our proxLimit, i. e., the |
||||
last row of the routing table) |
||||
*/ |
||||
type storeRequestMsgData struct { |
||||
Key storage.Key // hash of datasize | data
|
||||
SData []byte // the actual chunk Data
|
||||
// optional
|
||||
Id uint64 // request ID. if delivery, the ID is retrieve request ID
|
||||
requestTimeout *time.Time // expiry for forwarding - [not serialised][not currently used]
|
||||
storageTimeout *time.Time // expiry of content - [not serialised][not currently used]
|
||||
from *peer // [not serialised] protocol registers the requester
|
||||
} |
||||
|
||||
func (self storeRequestMsgData) String() string { |
||||
var from string |
||||
if self.from == nil { |
||||
from = "self" |
||||
} else { |
||||
from = self.from.Addr().String() |
||||
} |
||||
end := len(self.SData) |
||||
if len(self.SData) > 10 { |
||||
end = 10 |
||||
} |
||||
return fmt.Sprintf("from: %v, Key: %v; ID: %v, requestTimeout: %v, storageTimeout: %v, SData %x", from, self.Key, self.Id, self.requestTimeout, self.storageTimeout, self.SData[:end]) |
||||
} |
||||
|
||||
/* |
||||
Retrieve request |
||||
|
||||
Timeout in milliseconds. Note that zero timeout retrieval requests do not request forwarding, but prompt for a peers message response. therefore they serve also |
||||
as messages to retrieve peers. |
||||
|
||||
MaxSize specifies the maximum size that the peer will accept. This is useful in |
||||
particular if we allow storage and delivery of multichunk payload representing |
||||
the entire or partial subtree unfolding from the requested root key. |
||||
So when only interested in limited part of a stream (infinite trees) or only |
||||
testing chunk availability etc etc, we can indicate it by limiting the size here. |
||||
|
||||
Request ID can be newly generated or kept from the request originator. |
||||
If request ID Is missing or zero, the request is handled as a lookup only |
||||
prompting a peers response but not launching a search. Lookup requests are meant |
||||
to be used to bootstrap kademlia tables. |
||||
|
||||
In the special case that the key is the zero value as well, the remote peer's |
||||
address is assumed (the message is to be handled as a self lookup request). |
||||
The response is a PeersMsg with the peers in the kademlia proximity bin |
||||
corresponding to the address. |
||||
*/ |
||||
|
||||
type retrieveRequestMsgData struct { |
||||
Key storage.Key // target Key address of chunk to be retrieved
|
||||
Id uint64 // request id, request is a lookup if missing or zero
|
||||
MaxSize uint64 // maximum size of delivery accepted
|
||||
MaxPeers uint64 // maximum number of peers returned
|
||||
Timeout uint64 // the longest time we are expecting a response
|
||||
timeout *time.Time // [not serialied]
|
||||
from *peer //
|
||||
} |
||||
|
||||
func (self *retrieveRequestMsgData) String() string { |
||||
var from string |
||||
if self.from == nil { |
||||
from = "ourselves" |
||||
} else { |
||||
from = self.from.Addr().String() |
||||
} |
||||
var target []byte |
||||
if len(self.Key) > 3 { |
||||
target = self.Key[:4] |
||||
} |
||||
return fmt.Sprintf("from: %v, Key: %x; ID: %v, MaxSize: %v, MaxPeers: %d", from, target, self.Id, self.MaxSize, self.MaxPeers) |
||||
} |
||||
|
||||
// lookups are encoded by missing request ID
|
||||
func (self *retrieveRequestMsgData) isLookup() bool { |
||||
return self.Id == 0 |
||||
} |
||||
|
||||
// sets timeout fields
|
||||
func (self *retrieveRequestMsgData) setTimeout(t *time.Time) { |
||||
self.timeout = t |
||||
if t != nil { |
||||
self.Timeout = uint64(t.UnixNano()) |
||||
} else { |
||||
self.Timeout = 0 |
||||
} |
||||
} |
||||
|
||||
func (self *retrieveRequestMsgData) getTimeout() (t *time.Time) { |
||||
if self.Timeout > 0 && self.timeout == nil { |
||||
timeout := time.Unix(int64(self.Timeout), 0) |
||||
t = &timeout |
||||
self.timeout = t |
||||
} |
||||
return |
||||
} |
||||
|
||||
// peerAddr is sent in StatusMsg as part of the handshake
|
||||
type peerAddr struct { |
||||
IP net.IP |
||||
Port uint16 |
||||
ID []byte // the 64 byte NodeID (ECDSA Public Key)
|
||||
Addr kademlia.Address |
||||
} |
||||
|
||||
// peerAddr pretty prints as enode
|
||||
func (self *peerAddr) String() string { |
||||
var nodeid discover.NodeID |
||||
copy(nodeid[:], self.ID) |
||||
return discover.NewNode(nodeid, self.IP, 0, self.Port).String() |
||||
} |
||||
|
||||
/* |
||||
peers Msg is one response to retrieval; it is always encouraged after a retrieval |
||||
request to respond with a list of peers in the same kademlia proximity bin. |
||||
The encoding of a peer is identical to that in the devp2p base protocol peers |
||||
messages: [IP, Port, NodeID] |
||||
note that a node's DPA address is not the NodeID but the hash of the NodeID. |
||||
|
||||
Timeout serves to indicate whether the responder is forwarding the query within |
||||
the timeout or not. |
||||
|
||||
NodeID serves as the owner of payment contracts and signer of proofs of transfer. |
||||
|
||||
The Key is the target (if response to a retrieval request) or missing (zero value) |
||||
peers address (hash of NodeID) if retrieval request was a self lookup. |
||||
|
||||
Peers message is requested by retrieval requests with a missing or zero value request ID |
||||
*/ |
||||
type peersMsgData struct { |
||||
Peers []*peerAddr //
|
||||
Timeout uint64 //
|
||||
timeout *time.Time // indicate whether responder is expected to deliver content
|
||||
Key storage.Key // present if a response to a retrieval request
|
||||
Id uint64 // present if a response to a retrieval request
|
||||
from *peer |
||||
} |
||||
|
||||
// peers msg pretty printer
|
||||
func (self *peersMsgData) String() string { |
||||
var from string |
||||
if self.from == nil { |
||||
from = "ourselves" |
||||
} else { |
||||
from = self.from.Addr().String() |
||||
} |
||||
var target []byte |
||||
if len(self.Key) > 3 { |
||||
target = self.Key[:4] |
||||
} |
||||
return fmt.Sprintf("from: %v, Key: %x; ID: %v, Peers: %v", from, target, self.Id, self.Peers) |
||||
} |
||||
|
||||
func (self *peersMsgData) setTimeout(t *time.Time) { |
||||
self.timeout = t |
||||
if t != nil { |
||||
self.Timeout = uint64(t.UnixNano()) |
||||
} else { |
||||
self.Timeout = 0 |
||||
} |
||||
} |
||||
|
||||
/* |
||||
syncRequest |
||||
|
||||
is sent after the handshake to initiate syncing |
||||
the syncState of the remote node is persisted in kaddb and set on the |
||||
peer/protocol instance when the node is registered by hive as online{ |
||||
*/ |
||||
|
||||
type syncRequestMsgData struct { |
||||
SyncState *syncState `rlp:"nil"` |
||||
} |
||||
|
||||
func (self *syncRequestMsgData) String() string { |
||||
return fmt.Sprintf("%v", self.SyncState) |
||||
} |
||||
|
||||
/* |
||||
deliveryRequest |
||||
|
||||
is sent once a batch of sync keys is filtered. The ones not found are |
||||
sent as a list of syncReuest (hash, priority) in the Deliver field. |
||||
When the source receives the sync request it continues to iterate |
||||
and fetch at most N items as yet unsynced. |
||||
At the same time responds with deliveries of the items. |
||||
*/ |
||||
type deliveryRequestMsgData struct { |
||||
Deliver []*syncRequest |
||||
} |
||||
|
||||
func (self *deliveryRequestMsgData) String() string { |
||||
return fmt.Sprintf("sync request for new chunks\ndelivery request for %v chunks", len(self.Deliver)) |
||||
} |
||||
|
||||
/* |
||||
unsyncedKeys |
||||
|
||||
is sent first after the handshake if SyncState iterator brings up hundreds, thousands? |
||||
and subsequently sent as a response to deliveryRequestMsgData. |
||||
|
||||
Syncing is the iterative process of exchanging unsyncedKeys and deliveryRequestMsgs |
||||
both ways. |
||||
|
||||
State contains the sync state sent by the source. When the source receives the |
||||
sync state it continues to iterate and fetch at most N items as yet unsynced. |
||||
At the same time responds with deliveries of the items. |
||||
*/ |
||||
type unsyncedKeysMsgData struct { |
||||
Unsynced []*syncRequest |
||||
State *syncState |
||||
} |
||||
|
||||
func (self *unsyncedKeysMsgData) String() string { |
||||
return fmt.Sprintf("sync: keys of %d new chunks (state %v) => synced: %v", len(self.Unsynced), self.State, self.State.Synced) |
||||
} |
||||
|
||||
/* |
||||
payment |
||||
|
||||
is sent when the swap balance is tilted in favour of the remote peer |
||||
and in absolute units exceeds the PayAt parameter in the remote peer's profile |
||||
*/ |
||||
|
||||
type paymentMsgData struct { |
||||
Units uint // units actually paid for (checked against amount by swap)
|
||||
Promise *chequebook.Cheque // payment with cheque
|
||||
} |
||||
|
||||
func (self *paymentMsgData) String() string { |
||||
return fmt.Sprintf("payment for %d units: %v", self.Units, self.Promise) |
||||
} |
@ -0,0 +1,111 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// package priority_queue implement a channel based priority queue
|
||||
// over arbitrary types. It provides an
|
||||
// an autopop loop applying a function to the items always respecting
|
||||
// their priority. The structure is only quasi consistent ie., if a lower
|
||||
// priority item is autopopped, it is guaranteed that there was a point
|
||||
// when no higher priority item was present, ie. it is not guaranteed
|
||||
// that there was any point where the lower priority item was present
|
||||
// but the higher was not
|
||||
|
||||
package priorityqueue |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
) |
||||
|
||||
var ( |
||||
errContention = errors.New("queue contention") |
||||
errBadPriority = errors.New("bad priority") |
||||
|
||||
wakey = struct{}{} |
||||
) |
||||
|
||||
// PriorityQueue is the basic structure
|
||||
type PriorityQueue struct { |
||||
queues []chan interface{} |
||||
wakeup chan struct{} |
||||
} |
||||
|
||||
// New is the constructor for PriorityQueue
|
||||
func New(n int, l int) *PriorityQueue { |
||||
var queues = make([]chan interface{}, n) |
||||
for i := range queues { |
||||
queues[i] = make(chan interface{}, l) |
||||
} |
||||
return &PriorityQueue{ |
||||
queues: queues, |
||||
wakeup: make(chan struct{}, 1), |
||||
} |
||||
} |
||||
|
||||
// Run is a forever loop popping items from the queues
|
||||
func (pq *PriorityQueue) Run(ctx context.Context, f func(interface{})) { |
||||
top := len(pq.queues) - 1 |
||||
p := top |
||||
READ: |
||||
for { |
||||
q := pq.queues[p] |
||||
select { |
||||
case <-ctx.Done(): |
||||
return |
||||
case x := <-q: |
||||
f(x) |
||||
p = top |
||||
default: |
||||
if p > 0 { |
||||
p-- |
||||
continue READ |
||||
} |
||||
p = top |
||||
select { |
||||
case <-ctx.Done(): |
||||
return |
||||
case <-pq.wakeup: |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Push pushes an item to the appropriate queue specified in the priority argument
|
||||
// if context is given it waits until either the item is pushed or the Context aborts
|
||||
// otherwise returns errContention if the queue is full
|
||||
func (pq *PriorityQueue) Push(ctx context.Context, x interface{}, p int) error { |
||||
if p < 0 || p >= len(pq.queues) { |
||||
return errBadPriority |
||||
} |
||||
if ctx == nil { |
||||
select { |
||||
case pq.queues[p] <- x: |
||||
default: |
||||
return errContention |
||||
} |
||||
} else { |
||||
select { |
||||
case pq.queues[p] <- x: |
||||
case <-ctx.Done(): |
||||
return ctx.Err() |
||||
} |
||||
} |
||||
select { |
||||
case pq.wakeup <- wakey: |
||||
default: |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,97 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
package priorityqueue |
||||
|
||||
import ( |
||||
"context" |
||||
"sync" |
||||
"testing" |
||||
) |
||||
|
||||
func TestPriorityQueue(t *testing.T) { |
||||
var results []string |
||||
wg := sync.WaitGroup{} |
||||
pq := New(3, 2) |
||||
wg.Add(1) |
||||
go pq.Run(context.Background(), func(v interface{}) { |
||||
results = append(results, v.(string)) |
||||
wg.Done() |
||||
}) |
||||
pq.Push(context.Background(), "2.0", 2) |
||||
wg.Wait() |
||||
if results[0] != "2.0" { |
||||
t.Errorf("expected first result %q, got %q", "2.0", results[0]) |
||||
} |
||||
|
||||
Loop: |
||||
for i, tc := range []struct { |
||||
priorities []int |
||||
values []string |
||||
results []string |
||||
errors []error |
||||
}{ |
||||
{ |
||||
priorities: []int{0}, |
||||
values: []string{""}, |
||||
results: []string{""}, |
||||
}, |
||||
{ |
||||
priorities: []int{0, 1}, |
||||
values: []string{"0.0", "1.0"}, |
||||
results: []string{"1.0", "0.0"}, |
||||
}, |
||||
{ |
||||
priorities: []int{1, 0}, |
||||
values: []string{"1.0", "0.0"}, |
||||
results: []string{"1.0", "0.0"}, |
||||
}, |
||||
{ |
||||
priorities: []int{0, 1, 1}, |
||||
values: []string{"0.0", "1.0", "1.1"}, |
||||
results: []string{"1.0", "1.1", "0.0"}, |
||||
}, |
||||
{ |
||||
priorities: []int{0, 0, 0}, |
||||
values: []string{"0.0", "0.0", "0.1"}, |
||||
errors: []error{nil, nil, errContention}, |
||||
}, |
||||
} { |
||||
var results []string |
||||
wg := sync.WaitGroup{} |
||||
pq := New(3, 2) |
||||
wg.Add(len(tc.values)) |
||||
for j, value := range tc.values { |
||||
err := pq.Push(nil, value, tc.priorities[j]) |
||||
if tc.errors != nil && err != tc.errors[j] { |
||||
t.Errorf("expected push error %v, got %v", tc.errors[j], err) |
||||
continue Loop |
||||
} |
||||
if err != nil { |
||||
continue Loop |
||||
} |
||||
} |
||||
go pq.Run(context.Background(), func(v interface{}) { |
||||
results = append(results, v.(string)) |
||||
wg.Done() |
||||
}) |
||||
wg.Wait() |
||||
for k, result := range tc.results { |
||||
if results[k] != result { |
||||
t.Errorf("test case %v: expected %v element %q, got %q", i, k, result, results[k]) |
||||
} |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,17 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package discovery |
@ -0,0 +1,586 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package discovery |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"errors" |
||||
"flag" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"math/rand" |
||||
"os" |
||||
"path" |
||||
"strings" |
||||
"sync" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
"github.com/ethereum/go-ethereum/swarm/network" |
||||
"github.com/ethereum/go-ethereum/swarm/state" |
||||
colorable "github.com/mattn/go-colorable" |
||||
) |
||||
|
||||
// serviceName is used with the exec adapter so the exec'd binary knows which
|
||||
// service to execute
|
||||
const serviceName = "discovery" |
||||
const testMinProxBinSize = 2 |
||||
const discoveryPersistenceDatadir = "discovery_persistence_test_store" |
||||
|
||||
var discoveryPersistencePath = path.Join(os.TempDir(), discoveryPersistenceDatadir) |
||||
var discoveryEnabled = true |
||||
var persistenceEnabled = false |
||||
|
||||
var services = adapters.Services{ |
||||
serviceName: newService, |
||||
} |
||||
|
||||
func cleanDbStores() error { |
||||
entries, err := ioutil.ReadDir(os.TempDir()) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
for _, f := range entries { |
||||
if strings.HasPrefix(f.Name(), discoveryPersistenceDatadir) { |
||||
os.RemoveAll(path.Join(os.TempDir(), f.Name())) |
||||
} |
||||
} |
||||
return nil |
||||
|
||||
} |
||||
|
||||
func getDbStore(nodeID string) (*state.DBStore, error) { |
||||
if _, err := os.Stat(discoveryPersistencePath + "_" + nodeID); os.IsNotExist(err) { |
||||
log.Info(fmt.Sprintf("directory for nodeID %s does not exist. creating...", nodeID)) |
||||
ioutil.TempDir("", discoveryPersistencePath+"_"+nodeID) |
||||
} |
||||
log.Info(fmt.Sprintf("opening storage directory for nodeID %s", nodeID)) |
||||
store, err := state.NewDBStore(discoveryPersistencePath + "_" + nodeID) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return store, nil |
||||
} |
||||
|
||||
var ( |
||||
nodeCount = flag.Int("nodes", 10, "number of nodes to create (default 10)") |
||||
initCount = flag.Int("conns", 1, "number of originally connected peers (default 1)") |
||||
snapshotFile = flag.String("snapshot", "", "create snapshot") |
||||
loglevel = flag.Int("loglevel", 3, "verbosity of logs") |
||||
rawlog = flag.Bool("rawlog", false, "remove terminal formatting from logs") |
||||
) |
||||
|
||||
func init() { |
||||
flag.Parse() |
||||
// register the discovery service which will run as a devp2p
|
||||
// protocol when using the exec adapter
|
||||
adapters.RegisterServices(services) |
||||
|
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(!*rawlog)))) |
||||
} |
||||
|
||||
// Benchmarks to test the average time it takes for an N-node ring
|
||||
// to full a healthy kademlia topology
|
||||
func BenchmarkDiscovery_8_1(b *testing.B) { benchmarkDiscovery(b, 8, 1) } |
||||
func BenchmarkDiscovery_16_1(b *testing.B) { benchmarkDiscovery(b, 16, 1) } |
||||
func BenchmarkDiscovery_32_1(b *testing.B) { benchmarkDiscovery(b, 32, 1) } |
||||
func BenchmarkDiscovery_64_1(b *testing.B) { benchmarkDiscovery(b, 64, 1) } |
||||
func BenchmarkDiscovery_128_1(b *testing.B) { benchmarkDiscovery(b, 128, 1) } |
||||
func BenchmarkDiscovery_256_1(b *testing.B) { benchmarkDiscovery(b, 256, 1) } |
||||
|
||||
func BenchmarkDiscovery_8_2(b *testing.B) { benchmarkDiscovery(b, 8, 2) } |
||||
func BenchmarkDiscovery_16_2(b *testing.B) { benchmarkDiscovery(b, 16, 2) } |
||||
func BenchmarkDiscovery_32_2(b *testing.B) { benchmarkDiscovery(b, 32, 2) } |
||||
func BenchmarkDiscovery_64_2(b *testing.B) { benchmarkDiscovery(b, 64, 2) } |
||||
func BenchmarkDiscovery_128_2(b *testing.B) { benchmarkDiscovery(b, 128, 2) } |
||||
func BenchmarkDiscovery_256_2(b *testing.B) { benchmarkDiscovery(b, 256, 2) } |
||||
|
||||
func BenchmarkDiscovery_8_4(b *testing.B) { benchmarkDiscovery(b, 8, 4) } |
||||
func BenchmarkDiscovery_16_4(b *testing.B) { benchmarkDiscovery(b, 16, 4) } |
||||
func BenchmarkDiscovery_32_4(b *testing.B) { benchmarkDiscovery(b, 32, 4) } |
||||
func BenchmarkDiscovery_64_4(b *testing.B) { benchmarkDiscovery(b, 64, 4) } |
||||
func BenchmarkDiscovery_128_4(b *testing.B) { benchmarkDiscovery(b, 128, 4) } |
||||
func BenchmarkDiscovery_256_4(b *testing.B) { benchmarkDiscovery(b, 256, 4) } |
||||
|
||||
func TestDiscoverySimulationDockerAdapter(t *testing.T) { |
||||
testDiscoverySimulationDockerAdapter(t, *nodeCount, *initCount) |
||||
} |
||||
|
||||
func testDiscoverySimulationDockerAdapter(t *testing.T, nodes, conns int) { |
||||
adapter, err := adapters.NewDockerAdapter() |
||||
if err != nil { |
||||
if err == adapters.ErrLinuxOnly { |
||||
t.Skip(err) |
||||
} else { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
testDiscoverySimulation(t, nodes, conns, adapter) |
||||
} |
||||
|
||||
func TestDiscoverySimulationExecAdapter(t *testing.T) { |
||||
testDiscoverySimulationExecAdapter(t, *nodeCount, *initCount) |
||||
} |
||||
|
||||
func testDiscoverySimulationExecAdapter(t *testing.T, nodes, conns int) { |
||||
baseDir, err := ioutil.TempDir("", "swarm-test") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer os.RemoveAll(baseDir) |
||||
testDiscoverySimulation(t, nodes, conns, adapters.NewExecAdapter(baseDir)) |
||||
} |
||||
|
||||
func TestDiscoverySimulationSimAdapter(t *testing.T) { |
||||
testDiscoverySimulationSimAdapter(t, *nodeCount, *initCount) |
||||
} |
||||
|
||||
func TestDiscoveryPersistenceSimulationSimAdapter(t *testing.T) { |
||||
testDiscoveryPersistenceSimulationSimAdapter(t, *nodeCount, *initCount) |
||||
} |
||||
|
||||
func testDiscoveryPersistenceSimulationSimAdapter(t *testing.T, nodes, conns int) { |
||||
testDiscoveryPersistenceSimulation(t, nodes, conns, adapters.NewSimAdapter(services)) |
||||
} |
||||
|
||||
func testDiscoverySimulationSimAdapter(t *testing.T, nodes, conns int) { |
||||
testDiscoverySimulation(t, nodes, conns, adapters.NewSimAdapter(services)) |
||||
} |
||||
|
||||
func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) { |
||||
startedAt := time.Now() |
||||
result, err := discoverySimulation(nodes, conns, adapter) |
||||
if err != nil { |
||||
t.Fatalf("Setting up simulation failed: %v", err) |
||||
} |
||||
if result.Error != nil { |
||||
t.Fatalf("Simulation failed: %s", result.Error) |
||||
} |
||||
t.Logf("Simulation with %d nodes passed in %s", nodes, result.FinishedAt.Sub(result.StartedAt)) |
||||
var min, max time.Duration |
||||
var sum int |
||||
for _, pass := range result.Passes { |
||||
duration := pass.Sub(result.StartedAt) |
||||
if sum == 0 || duration < min { |
||||
min = duration |
||||
} |
||||
if duration > max { |
||||
max = duration |
||||
} |
||||
sum += int(duration.Nanoseconds()) |
||||
} |
||||
t.Logf("Min: %s, Max: %s, Average: %s", min, max, time.Duration(sum/len(result.Passes))*time.Nanosecond) |
||||
finishedAt := time.Now() |
||||
t.Logf("Setup: %s, shutdown: %s", result.StartedAt.Sub(startedAt), finishedAt.Sub(result.FinishedAt)) |
||||
} |
||||
|
||||
func testDiscoveryPersistenceSimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) map[int][]byte { |
||||
persistenceEnabled = true |
||||
discoveryEnabled = true |
||||
|
||||
result, err := discoveryPersistenceSimulation(nodes, conns, adapter) |
||||
|
||||
if err != nil { |
||||
t.Fatalf("Setting up simulation failed: %v", err) |
||||
} |
||||
if result.Error != nil { |
||||
t.Fatalf("Simulation failed: %s", result.Error) |
||||
} |
||||
t.Logf("Simulation with %d nodes passed in %s", nodes, result.FinishedAt.Sub(result.StartedAt)) |
||||
// set the discovery and persistence flags again to default so other
|
||||
// tests will not be affected
|
||||
discoveryEnabled = true |
||||
persistenceEnabled = false |
||||
return nil |
||||
} |
||||
|
||||
func benchmarkDiscovery(b *testing.B, nodes, conns int) { |
||||
for i := 0; i < b.N; i++ { |
||||
result, err := discoverySimulation(nodes, conns, adapters.NewSimAdapter(services)) |
||||
if err != nil { |
||||
b.Fatalf("setting up simulation failed: %v", err) |
||||
} |
||||
if result.Error != nil { |
||||
b.Logf("simulation failed: %s", result.Error) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simulations.StepResult, error) { |
||||
// create network
|
||||
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ |
||||
ID: "0", |
||||
DefaultService: serviceName, |
||||
}) |
||||
defer net.Shutdown() |
||||
trigger := make(chan discover.NodeID) |
||||
ids := make([]discover.NodeID, nodes) |
||||
for i := 0; i < nodes; i++ { |
||||
conf := adapters.RandomNodeConfig() |
||||
node, err := net.NewNodeWithConfig(conf) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error starting node: %s", err) |
||||
} |
||||
if err := net.Start(node.ID()); err != nil { |
||||
return nil, fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err) |
||||
} |
||||
if err := triggerChecks(trigger, net, node.ID()); err != nil { |
||||
return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err) |
||||
} |
||||
ids[i] = node.ID() |
||||
} |
||||
|
||||
// run a simulation which connects the 10 nodes in a ring and waits
|
||||
// for full peer discovery
|
||||
var addrs [][]byte |
||||
action := func(ctx context.Context) error { |
||||
return nil |
||||
} |
||||
wg := sync.WaitGroup{} |
||||
for i := range ids { |
||||
// collect the overlay addresses, to
|
||||
addrs = append(addrs, network.ToOverlayAddr(ids[i].Bytes())) |
||||
for j := 0; j < conns; j++ { |
||||
var k int |
||||
if j == 0 { |
||||
k = (i + 1) % len(ids) |
||||
} else { |
||||
k = rand.Intn(len(ids)) |
||||
} |
||||
wg.Add(1) |
||||
go func(i, k int) { |
||||
defer wg.Done() |
||||
net.Connect(ids[i], ids[k]) |
||||
}(i, k) |
||||
} |
||||
} |
||||
wg.Wait() |
||||
log.Debug(fmt.Sprintf("nodes: %v", len(addrs))) |
||||
// construct the peer pot, so that kademlia health can be checked
|
||||
ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs) |
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return false, ctx.Err() |
||||
default: |
||||
} |
||||
|
||||
node := net.GetNode(id) |
||||
if node == nil { |
||||
return false, fmt.Errorf("unknown node: %s", id) |
||||
} |
||||
client, err := node.Client() |
||||
if err != nil { |
||||
return false, fmt.Errorf("error getting node client: %s", err) |
||||
} |
||||
healthy := &network.Health{} |
||||
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes())) |
||||
if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil { |
||||
return false, fmt.Errorf("error getting node health: %s", err) |
||||
} |
||||
log.Debug(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v\n%v", id, healthy.GotNN, healthy.KnowNN, healthy.Full, healthy.Hive)) |
||||
return healthy.KnowNN && healthy.GotNN && healthy.Full, nil |
||||
} |
||||
|
||||
// 64 nodes ~ 1min
|
||||
// 128 nodes ~
|
||||
timeout := 300 * time.Second |
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) |
||||
defer cancel() |
||||
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{ |
||||
Action: action, |
||||
Trigger: trigger, |
||||
Expect: &simulations.Expectation{ |
||||
Nodes: ids, |
||||
Check: check, |
||||
}, |
||||
}) |
||||
if result.Error != nil { |
||||
return result, nil |
||||
} |
||||
|
||||
if *snapshotFile != "" { |
||||
snap, err := net.Snapshot() |
||||
if err != nil { |
||||
return nil, errors.New("no shapshot dude") |
||||
} |
||||
jsonsnapshot, err := json.Marshal(snap) |
||||
if err != nil { |
||||
return nil, fmt.Errorf("corrupt json snapshot: %v", err) |
||||
} |
||||
log.Info("writing snapshot", "file", *snapshotFile) |
||||
err = ioutil.WriteFile(*snapshotFile, jsonsnapshot, 0755) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simulations.StepResult, error) { |
||||
cleanDbStores() |
||||
defer cleanDbStores() |
||||
|
||||
// create network
|
||||
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ |
||||
ID: "0", |
||||
DefaultService: serviceName, |
||||
}) |
||||
defer net.Shutdown() |
||||
trigger := make(chan discover.NodeID) |
||||
ids := make([]discover.NodeID, nodes) |
||||
var addrs [][]byte |
||||
|
||||
for i := 0; i < nodes; i++ { |
||||
conf := adapters.RandomNodeConfig() |
||||
node, err := net.NewNodeWithConfig(conf) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
if err != nil { |
||||
return nil, fmt.Errorf("error starting node: %s", err) |
||||
} |
||||
if err := net.Start(node.ID()); err != nil { |
||||
return nil, fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err) |
||||
} |
||||
if err := triggerChecks(trigger, net, node.ID()); err != nil { |
||||
return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err) |
||||
} |
||||
ids[i] = node.ID() |
||||
a := network.ToOverlayAddr(ids[i].Bytes()) |
||||
|
||||
addrs = append(addrs, a) |
||||
} |
||||
|
||||
// run a simulation which connects the 10 nodes in a ring and waits
|
||||
// for full peer discovery
|
||||
ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs) |
||||
|
||||
var restartTime time.Time |
||||
|
||||
action := func(ctx context.Context) error { |
||||
ticker := time.NewTicker(500 * time.Millisecond) |
||||
|
||||
for range ticker.C { |
||||
isHealthy := true |
||||
for _, id := range ids { |
||||
//call Healthy RPC
|
||||
node := net.GetNode(id) |
||||
if node == nil { |
||||
return fmt.Errorf("unknown node: %s", id) |
||||
} |
||||
client, err := node.Client() |
||||
if err != nil { |
||||
return fmt.Errorf("error getting node client: %s", err) |
||||
} |
||||
healthy := &network.Health{} |
||||
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes())) |
||||
if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil { |
||||
return fmt.Errorf("error getting node health: %s", err) |
||||
} |
||||
|
||||
log.Info(fmt.Sprintf("NODE: %s, IS HEALTHY: %t", id.String(), healthy.GotNN && healthy.KnowNN && healthy.Full)) |
||||
if !healthy.GotNN || !healthy.Full { |
||||
isHealthy = false |
||||
break |
||||
} |
||||
} |
||||
if isHealthy { |
||||
break |
||||
} |
||||
} |
||||
ticker.Stop() |
||||
|
||||
log.Info("reached healthy kademlia. starting to shutdown nodes.") |
||||
shutdownStarted := time.Now() |
||||
// stop all ids, then start them again
|
||||
for _, id := range ids { |
||||
node := net.GetNode(id) |
||||
|
||||
if err := net.Stop(node.ID()); err != nil { |
||||
return fmt.Errorf("error stopping node %s: %s", node.ID().TerminalString(), err) |
||||
} |
||||
} |
||||
log.Info(fmt.Sprintf("shutting down nodes took: %s", time.Since(shutdownStarted))) |
||||
persistenceEnabled = true |
||||
discoveryEnabled = false |
||||
restartTime = time.Now() |
||||
for _, id := range ids { |
||||
node := net.GetNode(id) |
||||
if err := net.Start(node.ID()); err != nil { |
||||
return fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err) |
||||
} |
||||
if err := triggerChecks(trigger, net, node.ID()); err != nil { |
||||
return fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err) |
||||
} |
||||
} |
||||
|
||||
log.Info(fmt.Sprintf("restarting nodes took: %s", time.Since(restartTime))) |
||||
|
||||
return nil |
||||
} |
||||
//connects in a chain
|
||||
wg := sync.WaitGroup{} |
||||
//connects in a ring
|
||||
for i := range ids { |
||||
for j := 1; j <= conns; j++ { |
||||
k := (i + j) % len(ids) |
||||
if k == i { |
||||
k = (k + 1) % len(ids) |
||||
} |
||||
wg.Add(1) |
||||
go func(i, k int) { |
||||
defer wg.Done() |
||||
net.Connect(ids[i], ids[k]) |
||||
}(i, k) |
||||
} |
||||
} |
||||
wg.Wait() |
||||
log.Debug(fmt.Sprintf("nodes: %v", len(addrs))) |
||||
// construct the peer pot, so that kademlia health can be checked
|
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return false, ctx.Err() |
||||
default: |
||||
} |
||||
|
||||
node := net.GetNode(id) |
||||
if node == nil { |
||||
return false, fmt.Errorf("unknown node: %s", id) |
||||
} |
||||
client, err := node.Client() |
||||
if err != nil { |
||||
return false, fmt.Errorf("error getting node client: %s", err) |
||||
} |
||||
healthy := &network.Health{} |
||||
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes())) |
||||
if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil { |
||||
return false, fmt.Errorf("error getting node health: %s", err) |
||||
} |
||||
log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v", id, healthy.GotNN, healthy.KnowNN, healthy.Full)) |
||||
|
||||
return healthy.KnowNN && healthy.GotNN && healthy.Full, nil |
||||
} |
||||
|
||||
// 64 nodes ~ 1min
|
||||
// 128 nodes ~
|
||||
timeout := 300 * time.Second |
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) |
||||
defer cancel() |
||||
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{ |
||||
Action: action, |
||||
Trigger: trigger, |
||||
Expect: &simulations.Expectation{ |
||||
Nodes: ids, |
||||
Check: check, |
||||
}, |
||||
}) |
||||
if result.Error != nil { |
||||
return result, nil |
||||
} |
||||
|
||||
return result, nil |
||||
} |
||||
|
||||
// triggerChecks triggers a simulation step check whenever a peer is added or
|
||||
// removed from the given node, and also every second to avoid a race between
|
||||
// peer events and kademlia becoming healthy
|
||||
func triggerChecks(trigger chan discover.NodeID, net *simulations.Network, id discover.NodeID) error { |
||||
node := net.GetNode(id) |
||||
if node == nil { |
||||
return fmt.Errorf("unknown node: %s", id) |
||||
} |
||||
client, err := node.Client() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
events := make(chan *p2p.PeerEvent) |
||||
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents") |
||||
if err != nil { |
||||
return fmt.Errorf("error getting peer events for node %v: %s", id, err) |
||||
} |
||||
go func() { |
||||
defer sub.Unsubscribe() |
||||
|
||||
tick := time.NewTicker(time.Second) |
||||
defer tick.Stop() |
||||
|
||||
for { |
||||
select { |
||||
case <-events: |
||||
trigger <- id |
||||
case <-tick.C: |
||||
trigger <- id |
||||
case err := <-sub.Err(): |
||||
if err != nil { |
||||
log.Error(fmt.Sprintf("error getting peer events for node %v", id), "err", err) |
||||
} |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
return nil |
||||
} |
||||
|
||||
func newService(ctx *adapters.ServiceContext) (node.Service, error) { |
||||
host := adapters.ExternalIP() |
||||
|
||||
addr := network.NewAddrFromNodeIDAndPort(ctx.Config.ID, host, ctx.Config.Port) |
||||
|
||||
kp := network.NewKadParams() |
||||
kp.MinProxBinSize = testMinProxBinSize |
||||
|
||||
if ctx.Config.Reachable != nil { |
||||
kp.Reachable = func(o network.OverlayAddr) bool { |
||||
return ctx.Config.Reachable(o.(*network.BzzAddr).ID()) |
||||
} |
||||
} |
||||
kad := network.NewKademlia(addr.Over(), kp) |
||||
hp := network.NewHiveParams() |
||||
hp.KeepAliveInterval = time.Duration(200) * time.Millisecond |
||||
hp.Discovery = discoveryEnabled |
||||
|
||||
log.Info(fmt.Sprintf("discovery for nodeID %s is %t", ctx.Config.ID.String(), hp.Discovery)) |
||||
|
||||
config := &network.BzzConfig{ |
||||
OverlayAddr: addr.Over(), |
||||
UnderlayAddr: addr.Under(), |
||||
HiveParams: hp, |
||||
} |
||||
|
||||
if persistenceEnabled { |
||||
log.Info(fmt.Sprintf("persistence enabled for nodeID %s", ctx.Config.ID.String())) |
||||
store, err := getDbStore(ctx.Config.ID.String()) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return network.NewBzz(config, kad, store, nil, nil), nil |
||||
} |
||||
|
||||
return network.NewBzz(config, kad, nil, nil, nil), nil |
||||
} |
File diff suppressed because one or more lines are too long
@ -0,0 +1,144 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// You can run this simulation using
|
||||
//
|
||||
// go run ./swarm/network/simulations/overlay.go
|
||||
package main |
||||
|
||||
import ( |
||||
"flag" |
||||
"fmt" |
||||
"net/http" |
||||
"runtime" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
"github.com/ethereum/go-ethereum/swarm/network" |
||||
"github.com/ethereum/go-ethereum/swarm/state" |
||||
colorable "github.com/mattn/go-colorable" |
||||
) |
||||
|
||||
var ( |
||||
noDiscovery = flag.Bool("no-discovery", false, "disable discovery (useful if you want to load a snapshot)") |
||||
vmodule = flag.String("vmodule", "", "log filters for logger via Vmodule") |
||||
verbosity = flag.Int("verbosity", 0, "log filters for logger via Vmodule") |
||||
httpSimPort = 8888 |
||||
) |
||||
|
||||
func init() { |
||||
flag.Parse() |
||||
//initialize the logger
|
||||
//this is a demonstration on how to use Vmodule for filtering logs
|
||||
//provide -vmodule as param, and comma-separated values, e.g.:
|
||||
//-vmodule overlay_test.go=4,simulations=3
|
||||
//above examples sets overlay_test.go logs to level 4, while packages ending with "simulations" to 3
|
||||
if *vmodule != "" { |
||||
//only enable the pattern matching handler if the flag has been provided
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))) |
||||
if *verbosity > 0 { |
||||
glogger.Verbosity(log.Lvl(*verbosity)) |
||||
} |
||||
glogger.Vmodule(*vmodule) |
||||
log.Root().SetHandler(glogger) |
||||
} |
||||
} |
||||
|
||||
type Simulation struct { |
||||
mtx sync.Mutex |
||||
stores map[discover.NodeID]*state.InmemoryStore |
||||
} |
||||
|
||||
func NewSimulation() *Simulation { |
||||
return &Simulation{ |
||||
stores: make(map[discover.NodeID]*state.InmemoryStore), |
||||
} |
||||
} |
||||
|
||||
func (s *Simulation) NewService(ctx *adapters.ServiceContext) (node.Service, error) { |
||||
id := ctx.Config.ID |
||||
s.mtx.Lock() |
||||
store, ok := s.stores[id] |
||||
if !ok { |
||||
store = state.NewInmemoryStore() |
||||
s.stores[id] = store |
||||
} |
||||
s.mtx.Unlock() |
||||
|
||||
addr := network.NewAddrFromNodeID(id) |
||||
|
||||
kp := network.NewKadParams() |
||||
kp.MinProxBinSize = 2 |
||||
kp.MaxBinSize = 4 |
||||
kp.MinBinSize = 1 |
||||
kp.MaxRetries = 1000 |
||||
kp.RetryExponent = 2 |
||||
kp.RetryInterval = 1000000 |
||||
kad := network.NewKademlia(addr.Over(), kp) |
||||
hp := network.NewHiveParams() |
||||
hp.Discovery = !*noDiscovery |
||||
hp.KeepAliveInterval = 300 * time.Millisecond |
||||
|
||||
config := &network.BzzConfig{ |
||||
OverlayAddr: addr.Over(), |
||||
UnderlayAddr: addr.Under(), |
||||
HiveParams: hp, |
||||
} |
||||
|
||||
return network.NewBzz(config, kad, store, nil, nil), nil |
||||
} |
||||
|
||||
//create the simulation network
|
||||
func newSimulationNetwork() *simulations.Network { |
||||
|
||||
s := NewSimulation() |
||||
services := adapters.Services{ |
||||
"overlay": s.NewService, |
||||
} |
||||
adapter := adapters.NewSimAdapter(services) |
||||
simNetwork := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ |
||||
DefaultService: "overlay", |
||||
}) |
||||
return simNetwork |
||||
} |
||||
|
||||
//return a new http server
|
||||
func newOverlaySim(sim *simulations.Network) *simulations.Server { |
||||
return simulations.NewServer(sim) |
||||
} |
||||
|
||||
// var server
|
||||
func main() { |
||||
//cpu optimization
|
||||
runtime.GOMAXPROCS(runtime.NumCPU()) |
||||
//run the sim
|
||||
runOverlaySim() |
||||
} |
||||
|
||||
func runOverlaySim() { |
||||
//create the simulation network
|
||||
net := newSimulationNetwork() |
||||
//create a http server with it
|
||||
sim := newOverlaySim(net) |
||||
log.Info(fmt.Sprintf("starting simulation server on 0.0.0.0:%d...", httpSimPort)) |
||||
//start the HTTP server
|
||||
http.ListenAndServe(fmt.Sprintf(":%d", httpSimPort), sim) |
||||
} |
@ -0,0 +1,195 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
package main |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/json" |
||||
"fmt" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/http/httptest" |
||||
"net/url" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
) |
||||
|
||||
var ( |
||||
nodeCount = 16 |
||||
) |
||||
|
||||
//This test is used to test the overlay simulation.
|
||||
//As the simulation is executed via a main, it is easily missed on changes
|
||||
//An automated test will prevent that
|
||||
//The test just connects to the simulations, starts the network,
|
||||
//starts the mocker, gets the number of nodes, and stops it again.
|
||||
//It also provides a documentation on the steps needed by frontends
|
||||
//to use the simulations
|
||||
func TestOverlaySim(t *testing.T) { |
||||
t.Skip("Test is flaky, see: https://github.com/ethersphere/go-ethereum/issues/592") |
||||
//start the simulation
|
||||
log.Info("Start simulation backend") |
||||
//get the simulation networ; needed to subscribe for up events
|
||||
net := newSimulationNetwork() |
||||
//create the overlay simulation
|
||||
sim := newOverlaySim(net) |
||||
//create a http test server with it
|
||||
srv := httptest.NewServer(sim) |
||||
defer srv.Close() |
||||
|
||||
log.Debug("Http simulation server started. Start simulation network") |
||||
//start the simulation network (initialization of simulation)
|
||||
resp, err := http.Post(srv.URL+"/start", "application/json", nil) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer resp.Body.Close() |
||||
if resp.StatusCode != http.StatusOK { |
||||
t.Fatalf("Expected Status Code %d, got %d", http.StatusOK, resp.StatusCode) |
||||
} |
||||
|
||||
log.Debug("Start mocker") |
||||
//start the mocker, needs a node count and an ID
|
||||
resp, err = http.PostForm(srv.URL+"/mocker/start", |
||||
url.Values{ |
||||
"node-count": {fmt.Sprintf("%d", nodeCount)}, |
||||
"mocker-type": {simulations.GetMockerList()[0]}, |
||||
}) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer resp.Body.Close() |
||||
if resp.StatusCode != http.StatusOK { |
||||
reason, err := ioutil.ReadAll(resp.Body) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
t.Fatalf("Expected Status Code %d, got %d, response body %s", http.StatusOK, resp.StatusCode, string(reason)) |
||||
} |
||||
|
||||
//variables needed to wait for nodes being up
|
||||
var upCount int |
||||
trigger := make(chan discover.NodeID) |
||||
|
||||
//wait for all nodes to be up
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) |
||||
defer cancel() |
||||
|
||||
//start watching node up events...
|
||||
go watchSimEvents(net, ctx, trigger) |
||||
|
||||
//...and wait until all expected up events (nodeCount) have been received
|
||||
LOOP: |
||||
for { |
||||
select { |
||||
case <-trigger: |
||||
//new node up event received, increase counter
|
||||
upCount++ |
||||
//all expected node up events received
|
||||
if upCount == nodeCount { |
||||
break LOOP |
||||
} |
||||
case <-ctx.Done(): |
||||
t.Fatalf("Timed out waiting for up events") |
||||
} |
||||
|
||||
} |
||||
|
||||
//at this point we can query the server
|
||||
log.Info("Get number of nodes") |
||||
//get the number of nodes
|
||||
resp, err = http.Get(srv.URL + "/nodes") |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
defer resp.Body.Close() |
||||
if resp.StatusCode != http.StatusOK { |
||||
t.Fatalf("err %s", resp.Status) |
||||
} |
||||
b, err := ioutil.ReadAll(resp.Body) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
//unmarshal number of nodes from JSON response
|
||||
var nodesArr []simulations.Node |
||||
err = json.Unmarshal(b, &nodesArr) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
//check if number of nodes received is same as sent
|
||||
if len(nodesArr) != nodeCount { |
||||
t.Fatal(fmt.Errorf("Expected %d number of nodes, got %d", nodeCount, len(nodesArr))) |
||||
} |
||||
|
||||
//need to let it run for a little while, otherwise stopping it immediately can crash due running nodes
|
||||
//wanting to connect to already stopped nodes
|
||||
time.Sleep(1 * time.Second) |
||||
|
||||
log.Info("Stop the network") |
||||
//stop the network
|
||||
resp, err = http.Post(srv.URL+"/stop", "application/json", nil) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer resp.Body.Close() |
||||
if resp.StatusCode != http.StatusOK { |
||||
t.Fatalf("err %s", resp.Status) |
||||
} |
||||
|
||||
log.Info("Reset the network") |
||||
//reset the network (removes all nodes and connections)
|
||||
resp, err = http.Post(srv.URL+"/reset", "application/json", nil) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer resp.Body.Close() |
||||
if resp.StatusCode != http.StatusOK { |
||||
t.Fatalf("err %s", resp.Status) |
||||
} |
||||
} |
||||
|
||||
//watch for events so we know when all nodes are up
|
||||
func watchSimEvents(net *simulations.Network, ctx context.Context, trigger chan discover.NodeID) { |
||||
events := make(chan *simulations.Event) |
||||
sub := net.Events().Subscribe(events) |
||||
defer sub.Unsubscribe() |
||||
|
||||
for { |
||||
select { |
||||
case ev := <-events: |
||||
//only catch node up events
|
||||
if ev.Type == simulations.EventTypeNode { |
||||
if ev.Node.Up { |
||||
log.Debug("got node up event", "event", ev, "node", ev.Node.Config.ID) |
||||
select { |
||||
case trigger <- ev.Node.Config.ID: |
||||
case <-ctx.Done(): |
||||
return |
||||
} |
||||
} |
||||
} |
||||
case <-ctx.Done(): |
||||
return |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,449 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream |
||||
|
||||
import ( |
||||
"context" |
||||
"encoding/binary" |
||||
"errors" |
||||
"flag" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"os" |
||||
"sync/atomic" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm/network" |
||||
"github.com/ethereum/go-ethereum/swarm/state" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock" |
||||
"github.com/ethereum/go-ethereum/swarm/storage/mock/db" |
||||
colorable "github.com/mattn/go-colorable" |
||||
) |
||||
|
||||
var ( |
||||
deliveries map[discover.NodeID]*Delivery |
||||
stores map[discover.NodeID]storage.ChunkStore |
||||
toAddr func(discover.NodeID) *network.BzzAddr |
||||
peerCount func(discover.NodeID) int |
||||
adapter = flag.String("adapter", "sim", "type of simulation: sim|exec|docker") |
||||
loglevel = flag.Int("loglevel", 2, "verbosity of logs") |
||||
nodes = flag.Int("nodes", 0, "number of nodes") |
||||
chunks = flag.Int("chunks", 0, "number of chunks") |
||||
useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)") |
||||
) |
||||
|
||||
var ( |
||||
defaultSkipCheck bool |
||||
waitPeerErrC chan error |
||||
chunkSize = 4096 |
||||
registries map[discover.NodeID]*TestRegistry |
||||
createStoreFunc func(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) |
||||
getRetrieveFunc = defaultRetrieveFunc |
||||
subscriptionCount = 0 |
||||
globalStore mock.GlobalStorer |
||||
globalStoreDir string |
||||
) |
||||
|
||||
var services = adapters.Services{ |
||||
"streamer": NewStreamerService, |
||||
"intervalsStreamer": newIntervalsStreamerService, |
||||
} |
||||
|
||||
func init() { |
||||
flag.Parse() |
||||
// register the Delivery service which will run as a devp2p
|
||||
// protocol when using the exec adapter
|
||||
adapters.RegisterServices(services) |
||||
|
||||
log.PrintOrigins(true) |
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) |
||||
} |
||||
|
||||
func createGlobalStore() { |
||||
var err error |
||||
globalStoreDir, err = ioutil.TempDir("", "global.store") |
||||
if err != nil { |
||||
log.Error("Error initiating global store temp directory!", "err", err) |
||||
return |
||||
} |
||||
globalStore, err = db.NewGlobalStore(globalStoreDir) |
||||
if err != nil { |
||||
log.Error("Error initiating global store!", "err", err) |
||||
} |
||||
} |
||||
|
||||
// NewStreamerService
|
||||
func NewStreamerService(ctx *adapters.ServiceContext) (node.Service, error) { |
||||
var err error |
||||
id := ctx.Config.ID |
||||
addr := toAddr(id) |
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams()) |
||||
stores[id], err = createStoreFunc(id, addr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
store := stores[id].(*storage.LocalStore) |
||||
db := storage.NewDBAPI(store) |
||||
delivery := NewDelivery(kad, db) |
||||
deliveries[id] = delivery |
||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ |
||||
SkipCheck: defaultSkipCheck, |
||||
DoRetrieve: false, |
||||
}) |
||||
RegisterSwarmSyncerServer(r, db) |
||||
RegisterSwarmSyncerClient(r, db) |
||||
go func() { |
||||
waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id)) |
||||
}() |
||||
fileStore := storage.NewFileStore(storage.NewNetStore(store, getRetrieveFunc(id)), storage.NewFileStoreParams()) |
||||
testRegistry := &TestRegistry{Registry: r, fileStore: fileStore} |
||||
registries[id] = testRegistry |
||||
return testRegistry, nil |
||||
} |
||||
|
||||
func defaultRetrieveFunc(id discover.NodeID) func(chunk *storage.Chunk) error { |
||||
return nil |
||||
} |
||||
|
||||
func datadirsCleanup() { |
||||
for _, id := range ids { |
||||
os.RemoveAll(datadirs[id]) |
||||
} |
||||
if globalStoreDir != "" { |
||||
os.RemoveAll(globalStoreDir) |
||||
} |
||||
} |
||||
|
||||
//local stores need to be cleaned up after the sim is done
|
||||
func localStoreCleanup() { |
||||
log.Info("Cleaning up...") |
||||
for _, id := range ids { |
||||
registries[id].Close() |
||||
stores[id].Close() |
||||
} |
||||
log.Info("Local store cleanup done") |
||||
} |
||||
|
||||
func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) { |
||||
// setup
|
||||
addr := network.RandomAddr() // tested peers peer address
|
||||
to := network.NewKademlia(addr.OAddr, network.NewKadParams()) |
||||
|
||||
// temp datadir
|
||||
datadir, err := ioutil.TempDir("", "streamer") |
||||
if err != nil { |
||||
return nil, nil, nil, func() {}, err |
||||
} |
||||
removeDataDir := func() { |
||||
os.RemoveAll(datadir) |
||||
} |
||||
|
||||
params := storage.NewDefaultLocalStoreParams() |
||||
params.Init(datadir) |
||||
params.BaseKey = addr.Over() |
||||
|
||||
localStore, err := storage.NewTestLocalStoreForAddr(params) |
||||
if err != nil { |
||||
return nil, nil, nil, removeDataDir, err |
||||
} |
||||
|
||||
db := storage.NewDBAPI(localStore) |
||||
delivery := NewDelivery(to, db) |
||||
streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ |
||||
SkipCheck: defaultSkipCheck, |
||||
}) |
||||
teardown := func() { |
||||
streamer.Close() |
||||
removeDataDir() |
||||
} |
||||
protocolTester := p2ptest.NewProtocolTester(t, network.NewNodeIDFromAddr(addr), 1, streamer.runProtocol) |
||||
|
||||
err = waitForPeers(streamer, 1*time.Second, 1) |
||||
if err != nil { |
||||
return nil, nil, nil, nil, errors.New("timeout: peer is not created") |
||||
} |
||||
|
||||
return protocolTester, streamer, localStore, teardown, nil |
||||
} |
||||
|
||||
func waitForPeers(streamer *Registry, timeout time.Duration, expectedPeers int) error { |
||||
ticker := time.NewTicker(10 * time.Millisecond) |
||||
timeoutTimer := time.NewTimer(timeout) |
||||
for { |
||||
select { |
||||
case <-ticker.C: |
||||
if streamer.peersCount() >= expectedPeers { |
||||
return nil |
||||
} |
||||
case <-timeoutTimer.C: |
||||
return errors.New("timeout") |
||||
} |
||||
} |
||||
} |
||||
|
||||
type roundRobinStore struct { |
||||
index uint32 |
||||
stores []storage.ChunkStore |
||||
} |
||||
|
||||
func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore { |
||||
return &roundRobinStore{ |
||||
stores: stores, |
||||
} |
||||
} |
||||
|
||||
func (rrs *roundRobinStore) Get(addr storage.Address) (*storage.Chunk, error) { |
||||
return nil, errors.New("get not well defined on round robin store") |
||||
} |
||||
|
||||
func (rrs *roundRobinStore) Put(chunk *storage.Chunk) { |
||||
i := atomic.AddUint32(&rrs.index, 1) |
||||
idx := int(i) % len(rrs.stores) |
||||
rrs.stores[idx].Put(chunk) |
||||
} |
||||
|
||||
func (rrs *roundRobinStore) Close() { |
||||
for _, store := range rrs.stores { |
||||
store.Close() |
||||
} |
||||
} |
||||
|
||||
type TestRegistry struct { |
||||
*Registry |
||||
fileStore *storage.FileStore |
||||
} |
||||
|
||||
func (r *TestRegistry) APIs() []rpc.API { |
||||
a := r.Registry.APIs() |
||||
a = append(a, rpc.API{ |
||||
Namespace: "stream", |
||||
Version: "3.0", |
||||
Service: r, |
||||
Public: true, |
||||
}) |
||||
return a |
||||
} |
||||
|
||||
func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) { |
||||
r, _ := fileStore.Retrieve(hash) |
||||
buf := make([]byte, 1024) |
||||
var n int |
||||
var total int64 |
||||
var err error |
||||
for (total == 0 || n > 0) && err == nil { |
||||
n, err = r.ReadAt(buf, total) |
||||
total += int64(n) |
||||
} |
||||
if err != nil && err != io.EOF { |
||||
return total, err |
||||
} |
||||
return total, nil |
||||
} |
||||
|
||||
func (r *TestRegistry) ReadAll(hash common.Hash) (int64, error) { |
||||
return readAll(r.fileStore, hash[:]) |
||||
} |
||||
|
||||
func (r *TestRegistry) Start(server *p2p.Server) error { |
||||
return r.Registry.Start(server) |
||||
} |
||||
|
||||
func (r *TestRegistry) Stop() error { |
||||
return r.Registry.Stop() |
||||
} |
||||
|
||||
type TestExternalRegistry struct { |
||||
*Registry |
||||
} |
||||
|
||||
func (r *TestExternalRegistry) APIs() []rpc.API { |
||||
a := r.Registry.APIs() |
||||
a = append(a, rpc.API{ |
||||
Namespace: "stream", |
||||
Version: "3.0", |
||||
Service: r, |
||||
Public: true, |
||||
}) |
||||
return a |
||||
} |
||||
|
||||
func (r *TestExternalRegistry) GetHashes(ctx context.Context, peerId discover.NodeID, s Stream) (*rpc.Subscription, error) { |
||||
peer := r.getPeer(peerId) |
||||
|
||||
client, err := peer.getClient(ctx, s) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
c := client.Client.(*testExternalClient) |
||||
|
||||
notifier, supported := rpc.NotifierFromContext(ctx) |
||||
if !supported { |
||||
return nil, fmt.Errorf("Subscribe not supported") |
||||
} |
||||
|
||||
sub := notifier.CreateSubscription() |
||||
|
||||
go func() { |
||||
// if we begin sending event immediately some events
|
||||
// will probably be dropped since the subscription ID might not be send to
|
||||
// the client.
|
||||
// ref: rpc/subscription_test.go#L65
|
||||
time.Sleep(1 * time.Second) |
||||
for { |
||||
select { |
||||
case h := <-c.hashes: |
||||
<-c.enableNotificationsC // wait for notification subscription to complete
|
||||
if err := notifier.Notify(sub.ID, h); err != nil { |
||||
log.Warn(fmt.Sprintf("rpc sub notifier notify stream %s: %v", s, err)) |
||||
} |
||||
case err := <-sub.Err(): |
||||
if err != nil { |
||||
log.Warn(fmt.Sprintf("caught subscription error in stream %s: %v", s, err)) |
||||
} |
||||
case <-notifier.Closed(): |
||||
log.Trace(fmt.Sprintf("rpc sub notifier closed")) |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
|
||||
return sub, nil |
||||
} |
||||
|
||||
func (r *TestExternalRegistry) EnableNotifications(peerId discover.NodeID, s Stream) error { |
||||
peer := r.getPeer(peerId) |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) |
||||
defer cancel() |
||||
|
||||
client, err := peer.getClient(ctx, s) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
close(client.Client.(*testExternalClient).enableNotificationsC) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// TODO: merge functionalities of testExternalClient and testExternalServer
|
||||
// with testClient and testServer.
|
||||
|
||||
type testExternalClient struct { |
||||
hashes chan []byte |
||||
db *storage.DBAPI |
||||
enableNotificationsC chan struct{} |
||||
} |
||||
|
||||
func newTestExternalClient(db *storage.DBAPI) *testExternalClient { |
||||
return &testExternalClient{ |
||||
hashes: make(chan []byte), |
||||
db: db, |
||||
enableNotificationsC: make(chan struct{}), |
||||
} |
||||
} |
||||
|
||||
func (c *testExternalClient) NeedData(hash []byte) func() { |
||||
chunk, _ := c.db.GetOrCreateRequest(hash) |
||||
if chunk.ReqC == nil { |
||||
return nil |
||||
} |
||||
c.hashes <- hash |
||||
return func() { |
||||
chunk.WaitToStore() |
||||
} |
||||
} |
||||
|
||||
func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) { |
||||
return nil |
||||
} |
||||
|
||||
func (c *testExternalClient) Close() {} |
||||
|
||||
const testExternalServerBatchSize = 10 |
||||
|
||||
type testExternalServer struct { |
||||
t string |
||||
keyFunc func(key []byte, index uint64) |
||||
sessionAt uint64 |
||||
maxKeys uint64 |
||||
streamer *TestExternalRegistry |
||||
} |
||||
|
||||
func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer { |
||||
if keyFunc == nil { |
||||
keyFunc = binary.BigEndian.PutUint64 |
||||
} |
||||
return &testExternalServer{ |
||||
t: t, |
||||
keyFunc: keyFunc, |
||||
sessionAt: sessionAt, |
||||
maxKeys: maxKeys, |
||||
} |
||||
} |
||||
|
||||
func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) { |
||||
if from == 0 && to == 0 { |
||||
from = s.sessionAt |
||||
to = s.sessionAt + testExternalServerBatchSize |
||||
} |
||||
if to-from > testExternalServerBatchSize { |
||||
to = from + testExternalServerBatchSize - 1 |
||||
} |
||||
if from >= s.maxKeys && to > s.maxKeys { |
||||
return nil, 0, 0, nil, io.EOF |
||||
} |
||||
if to > s.maxKeys { |
||||
to = s.maxKeys |
||||
} |
||||
b := make([]byte, HashSize*(to-from+1)) |
||||
for i := from; i <= to; i++ { |
||||
s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i) |
||||
} |
||||
return b, from, to, nil, nil |
||||
} |
||||
|
||||
func (s *testExternalServer) GetData([]byte) ([]byte, error) { |
||||
return make([]byte, 4096), nil |
||||
} |
||||
|
||||
func (s *testExternalServer) Close() {} |
||||
|
||||
// Sets the global value defaultSkipCheck.
|
||||
// It should be used in test function defer to reset the global value
|
||||
// to the original value.
|
||||
//
|
||||
// defer setDefaultSkipCheck(defaultSkipCheck)
|
||||
// defaultSkipCheck = skipCheck
|
||||
//
|
||||
// This works as defer function arguments evaluations are evaluated as ususal,
|
||||
// but only the function body invocation is deferred.
|
||||
func setDefaultSkipCheck(skipCheck bool) { |
||||
defaultSkipCheck = skipCheck |
||||
} |
@ -0,0 +1,272 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream |
||||
|
||||
import ( |
||||
"errors" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
"github.com/ethereum/go-ethereum/swarm/network" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
const ( |
||||
swarmChunkServerStreamName = "RETRIEVE_REQUEST" |
||||
deliveryCap = 32 |
||||
) |
||||
|
||||
var ( |
||||
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil) |
||||
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil) |
||||
|
||||
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil) |
||||
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil) |
||||
) |
||||
|
||||
type Delivery struct { |
||||
db *storage.DBAPI |
||||
overlay network.Overlay |
||||
receiveC chan *ChunkDeliveryMsg |
||||
getPeer func(discover.NodeID) *Peer |
||||
} |
||||
|
||||
func NewDelivery(overlay network.Overlay, db *storage.DBAPI) *Delivery { |
||||
d := &Delivery{ |
||||
db: db, |
||||
overlay: overlay, |
||||
receiveC: make(chan *ChunkDeliveryMsg, deliveryCap), |
||||
} |
||||
|
||||
go d.processReceivedChunks() |
||||
return d |
||||
} |
||||
|
||||
// SwarmChunkServer implements Server
|
||||
type SwarmChunkServer struct { |
||||
deliveryC chan []byte |
||||
batchC chan []byte |
||||
db *storage.DBAPI |
||||
currentLen uint64 |
||||
quit chan struct{} |
||||
} |
||||
|
||||
// NewSwarmChunkServer is SwarmChunkServer constructor
|
||||
func NewSwarmChunkServer(db *storage.DBAPI) *SwarmChunkServer { |
||||
s := &SwarmChunkServer{ |
||||
deliveryC: make(chan []byte, deliveryCap), |
||||
batchC: make(chan []byte), |
||||
db: db, |
||||
quit: make(chan struct{}), |
||||
} |
||||
go s.processDeliveries() |
||||
return s |
||||
} |
||||
|
||||
// processDeliveries handles delivered chunk hashes
|
||||
func (s *SwarmChunkServer) processDeliveries() { |
||||
var hashes []byte |
||||
var batchC chan []byte |
||||
for { |
||||
select { |
||||
case <-s.quit: |
||||
return |
||||
case hash := <-s.deliveryC: |
||||
hashes = append(hashes, hash...) |
||||
batchC = s.batchC |
||||
case batchC <- hashes: |
||||
hashes = nil |
||||
batchC = nil |
||||
} |
||||
} |
||||
} |
||||
|
||||
// SetNextBatch
|
||||
func (s *SwarmChunkServer) SetNextBatch(_, _ uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) { |
||||
select { |
||||
case hashes = <-s.batchC: |
||||
case <-s.quit: |
||||
return |
||||
} |
||||
|
||||
from = s.currentLen |
||||
s.currentLen += uint64(len(hashes)) |
||||
to = s.currentLen |
||||
return |
||||
} |
||||
|
||||
// Close needs to be called on a stream server
|
||||
func (s *SwarmChunkServer) Close() { |
||||
close(s.quit) |
||||
} |
||||
|
||||
// GetData retrives chunk data from db store
|
||||
func (s *SwarmChunkServer) GetData(key []byte) ([]byte, error) { |
||||
chunk, err := s.db.Get(storage.Address(key)) |
||||
if err == storage.ErrFetching { |
||||
<-chunk.ReqC |
||||
} else if err != nil { |
||||
return nil, err |
||||
} |
||||
return chunk.SData, nil |
||||
} |
||||
|
||||
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
|
||||
type RetrieveRequestMsg struct { |
||||
Addr storage.Address |
||||
SkipCheck bool |
||||
} |
||||
|
||||
func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) error { |
||||
log.Trace("received request", "peer", sp.ID(), "hash", req.Addr) |
||||
handleRetrieveRequestMsgCount.Inc(1) |
||||
|
||||
s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", false)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
streamer := s.Server.(*SwarmChunkServer) |
||||
chunk, created := d.db.GetOrCreateRequest(req.Addr) |
||||
if chunk.ReqC != nil { |
||||
if created { |
||||
if err := d.RequestFromPeers(chunk.Addr[:], true, sp.ID()); err != nil { |
||||
log.Warn("unable to forward chunk request", "peer", sp.ID(), "key", chunk.Addr, "err", err) |
||||
chunk.SetErrored(storage.ErrChunkForward) |
||||
return nil |
||||
} |
||||
} |
||||
go func() { |
||||
t := time.NewTimer(10 * time.Minute) |
||||
defer t.Stop() |
||||
|
||||
log.Debug("waiting delivery", "peer", sp.ID(), "hash", req.Addr, "node", common.Bytes2Hex(d.overlay.BaseAddr()), "created", created) |
||||
start := time.Now() |
||||
select { |
||||
case <-chunk.ReqC: |
||||
log.Debug("retrieve request ReqC closed", "peer", sp.ID(), "hash", req.Addr, "time", time.Since(start)) |
||||
case <-t.C: |
||||
log.Debug("retrieve request timeout", "peer", sp.ID(), "hash", req.Addr) |
||||
chunk.SetErrored(storage.ErrChunkTimeout) |
||||
return |
||||
} |
||||
chunk.SetErrored(nil) |
||||
|
||||
if req.SkipCheck { |
||||
err := sp.Deliver(chunk, s.priority) |
||||
if err != nil { |
||||
log.Warn("ERROR in handleRetrieveRequestMsg, DROPPING peer!", "err", err) |
||||
sp.Drop(err) |
||||
} |
||||
} |
||||
streamer.deliveryC <- chunk.Addr[:] |
||||
}() |
||||
return nil |
||||
} |
||||
// TODO: call the retrieve function of the outgoing syncer
|
||||
if req.SkipCheck { |
||||
log.Trace("deliver", "peer", sp.ID(), "hash", chunk.Addr) |
||||
if length := len(chunk.SData); length < 9 { |
||||
log.Error("Chunk.SData to deliver is too short", "len(chunk.SData)", length, "address", chunk.Addr) |
||||
} |
||||
return sp.Deliver(chunk, s.priority) |
||||
} |
||||
streamer.deliveryC <- chunk.Addr[:] |
||||
return nil |
||||
} |
||||
|
||||
type ChunkDeliveryMsg struct { |
||||
Addr storage.Address |
||||
SData []byte // the stored chunk Data (incl size)
|
||||
peer *Peer // set in handleChunkDeliveryMsg
|
||||
} |
||||
|
||||
func (d *Delivery) handleChunkDeliveryMsg(sp *Peer, req *ChunkDeliveryMsg) error { |
||||
req.peer = sp |
||||
d.receiveC <- req |
||||
return nil |
||||
} |
||||
|
||||
func (d *Delivery) processReceivedChunks() { |
||||
R: |
||||
for req := range d.receiveC { |
||||
processReceivedChunksCount.Inc(1) |
||||
|
||||
// this should be has locally
|
||||
chunk, err := d.db.Get(req.Addr) |
||||
if err == nil { |
||||
continue R |
||||
} |
||||
if err != storage.ErrFetching { |
||||
log.Error("processReceivedChunks db error", "addr", req.Addr, "err", err, "chunk", chunk) |
||||
continue R |
||||
} |
||||
select { |
||||
case <-chunk.ReqC: |
||||
log.Error("someone else delivered?", "hash", chunk.Addr.Hex()) |
||||
continue R |
||||
default: |
||||
} |
||||
chunk.SData = req.SData |
||||
d.db.Put(chunk) |
||||
|
||||
go func(req *ChunkDeliveryMsg) { |
||||
err := chunk.WaitToStore() |
||||
if err == storage.ErrChunkInvalid { |
||||
req.peer.Drop(err) |
||||
} |
||||
}(req) |
||||
} |
||||
} |
||||
|
||||
// RequestFromPeers sends a chunk retrieve request to
|
||||
func (d *Delivery) RequestFromPeers(hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error { |
||||
var success bool |
||||
var err error |
||||
requestFromPeersCount.Inc(1) |
||||
d.overlay.EachConn(hash, 255, func(p network.OverlayConn, po int, nn bool) bool { |
||||
spId := p.(network.Peer).ID() |
||||
for _, p := range peersToSkip { |
||||
if p == spId { |
||||
log.Trace("Delivery.RequestFromPeers: skip peer", "peer", spId) |
||||
return true |
||||
} |
||||
} |
||||
sp := d.getPeer(spId) |
||||
if sp == nil { |
||||
log.Warn("Delivery.RequestFromPeers: peer not found", "id", spId) |
||||
return true |
||||
} |
||||
// TODO: skip light nodes that do not accept retrieve requests
|
||||
err = sp.SendPriority(&RetrieveRequestMsg{ |
||||
Addr: hash, |
||||
SkipCheck: skipCheck, |
||||
}, Top) |
||||
if err != nil { |
||||
return true |
||||
} |
||||
requestFromPeersEachCount.Inc(1) |
||||
success = true |
||||
return false |
||||
}) |
||||
if success { |
||||
return nil |
||||
} |
||||
return errors.New("no peer found") |
||||
} |
@ -0,0 +1,699 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
crand "crypto/rand" |
||||
"fmt" |
||||
"io" |
||||
"sync" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
"github.com/ethereum/go-ethereum/swarm/network" |
||||
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
func TestStreamerRetrieveRequest(t *testing.T) { |
||||
tester, streamer, _, teardown, err := newStreamerTester(t) |
||||
defer teardown() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
peerID := tester.IDs[0] |
||||
|
||||
streamer.delivery.RequestFromPeers(hash0[:], true) |
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{ |
||||
Label: "RetrieveRequestMsg", |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 5, |
||||
Msg: &RetrieveRequestMsg{ |
||||
Addr: hash0[:], |
||||
SkipCheck: true, |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
if err != nil { |
||||
t.Fatalf("Expected no error, got %v", err) |
||||
} |
||||
} |
||||
|
||||
func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) { |
||||
tester, streamer, _, teardown, err := newStreamerTester(t) |
||||
defer teardown() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
peerID := tester.IDs[0] |
||||
|
||||
chunk := storage.NewChunk(storage.Address(hash0[:]), nil) |
||||
|
||||
peer := streamer.getPeer(peerID) |
||||
|
||||
peer.handleSubscribeMsg(&SubscribeMsg{ |
||||
Stream: NewStream(swarmChunkServerStreamName, "", false), |
||||
History: nil, |
||||
Priority: Top, |
||||
}) |
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{ |
||||
Label: "RetrieveRequestMsg", |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 5, |
||||
Msg: &RetrieveRequestMsg{ |
||||
Addr: chunk.Addr[:], |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 1, |
||||
Msg: &OfferedHashesMsg{ |
||||
HandoverProof: nil, |
||||
Hashes: nil, |
||||
From: 0, |
||||
To: 0, |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
expectedError := `exchange #0 "RetrieveRequestMsg": timed out` |
||||
if err == nil || err.Error() != expectedError { |
||||
t.Fatalf("Expected error %v, got %v", expectedError, err) |
||||
} |
||||
} |
||||
|
||||
// upstream request server receives a retrieve Request and responds with
|
||||
// offered hashes or delivery if skipHash is set to true
|
||||
func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { |
||||
tester, streamer, localStore, teardown, err := newStreamerTester(t) |
||||
defer teardown() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
peerID := tester.IDs[0] |
||||
peer := streamer.getPeer(peerID) |
||||
|
||||
stream := NewStream(swarmChunkServerStreamName, "", false) |
||||
|
||||
peer.handleSubscribeMsg(&SubscribeMsg{ |
||||
Stream: stream, |
||||
History: nil, |
||||
Priority: Top, |
||||
}) |
||||
|
||||
hash := storage.Address(hash0[:]) |
||||
chunk := storage.NewChunk(hash, nil) |
||||
chunk.SData = hash |
||||
localStore.Put(chunk) |
||||
chunk.WaitToStore() |
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{ |
||||
Label: "RetrieveRequestMsg", |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 5, |
||||
Msg: &RetrieveRequestMsg{ |
||||
Addr: hash, |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 1, |
||||
Msg: &OfferedHashesMsg{ |
||||
HandoverProof: &HandoverProof{ |
||||
Handover: &Handover{}, |
||||
}, |
||||
Hashes: hash, |
||||
From: 0, |
||||
// TODO: why is this 32???
|
||||
To: 32, |
||||
Stream: stream, |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
hash = storage.Address(hash1[:]) |
||||
chunk = storage.NewChunk(hash, nil) |
||||
chunk.SData = hash1[:] |
||||
localStore.Put(chunk) |
||||
chunk.WaitToStore() |
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{ |
||||
Label: "RetrieveRequestMsg", |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 5, |
||||
Msg: &RetrieveRequestMsg{ |
||||
Addr: hash, |
||||
SkipCheck: true, |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 6, |
||||
Msg: &ChunkDeliveryMsg{ |
||||
Addr: hash, |
||||
SData: hash, |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) { |
||||
tester, streamer, localStore, teardown, err := newStreamerTester(t) |
||||
defer teardown() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) { |
||||
return &testClient{ |
||||
t: t, |
||||
}, nil |
||||
}) |
||||
|
||||
peerID := tester.IDs[0] |
||||
|
||||
stream := NewStream("foo", "", true) |
||||
err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top) |
||||
if err != nil { |
||||
t.Fatalf("Expected no error, got %v", err) |
||||
} |
||||
|
||||
chunkKey := hash0[:] |
||||
chunkData := hash1[:] |
||||
chunk, created := localStore.GetOrCreateRequest(chunkKey) |
||||
|
||||
if !created { |
||||
t.Fatal("chunk already exists") |
||||
} |
||||
select { |
||||
case <-chunk.ReqC: |
||||
t.Fatal("chunk is already received") |
||||
default: |
||||
} |
||||
|
||||
err = tester.TestExchanges(p2ptest.Exchange{ |
||||
Label: "Subscribe message", |
||||
Expects: []p2ptest.Expect{ |
||||
{ |
||||
Code: 4, |
||||
Msg: &SubscribeMsg{ |
||||
Stream: stream, |
||||
History: NewRange(5, 8), |
||||
Priority: Top, |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
}, |
||||
p2ptest.Exchange{ |
||||
Label: "ChunkDeliveryRequest message", |
||||
Triggers: []p2ptest.Trigger{ |
||||
{ |
||||
Code: 6, |
||||
Msg: &ChunkDeliveryMsg{ |
||||
Addr: chunkKey, |
||||
SData: chunkData, |
||||
}, |
||||
Peer: peerID, |
||||
}, |
||||
}, |
||||
}) |
||||
|
||||
if err != nil { |
||||
t.Fatalf("Expected no error, got %v", err) |
||||
} |
||||
|
||||
timeout := time.NewTimer(1 * time.Second) |
||||
|
||||
select { |
||||
case <-timeout.C: |
||||
t.Fatal("timeout receiving chunk") |
||||
case <-chunk.ReqC: |
||||
} |
||||
|
||||
storedChunk, err := localStore.Get(chunkKey) |
||||
if err != nil { |
||||
t.Fatalf("Expected no error, got %v", err) |
||||
} |
||||
|
||||
if !bytes.Equal(storedChunk.SData, chunkData) { |
||||
t.Fatal("Retrieved chunk has different data than original") |
||||
} |
||||
|
||||
} |
||||
|
||||
func TestDeliveryFromNodes(t *testing.T) { |
||||
testDeliveryFromNodes(t, 2, 1, dataChunkCount, true) |
||||
testDeliveryFromNodes(t, 2, 1, dataChunkCount, false) |
||||
testDeliveryFromNodes(t, 4, 1, dataChunkCount, true) |
||||
testDeliveryFromNodes(t, 4, 1, dataChunkCount, false) |
||||
testDeliveryFromNodes(t, 8, 1, dataChunkCount, true) |
||||
testDeliveryFromNodes(t, 8, 1, dataChunkCount, false) |
||||
testDeliveryFromNodes(t, 16, 1, dataChunkCount, true) |
||||
testDeliveryFromNodes(t, 16, 1, dataChunkCount, false) |
||||
} |
||||
|
||||
func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) { |
||||
defaultSkipCheck = skipCheck |
||||
toAddr = network.NewAddrFromNodeID |
||||
createStoreFunc = createTestLocalStorageFromSim |
||||
conf := &streamTesting.RunConfig{ |
||||
Adapter: *adapter, |
||||
NodeCount: nodes, |
||||
ConnLevel: conns, |
||||
ToAddr: toAddr, |
||||
Services: services, |
||||
EnableMsgEvents: false, |
||||
} |
||||
|
||||
sim, teardown, err := streamTesting.NewSimulation(conf) |
||||
var rpcSubscriptionsWg sync.WaitGroup |
||||
defer func() { |
||||
rpcSubscriptionsWg.Wait() |
||||
teardown() |
||||
}() |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
stores = make(map[discover.NodeID]storage.ChunkStore) |
||||
for i, id := range sim.IDs { |
||||
stores[id] = sim.Stores[i] |
||||
} |
||||
registries = make(map[discover.NodeID]*TestRegistry) |
||||
deliveries = make(map[discover.NodeID]*Delivery) |
||||
peerCount = func(id discover.NodeID) int { |
||||
if sim.IDs[0] == id || sim.IDs[nodes-1] == id { |
||||
return 1 |
||||
} |
||||
return 2 |
||||
} |
||||
|
||||
// here we distribute chunks of a random file into Stores of nodes 1 to nodes
|
||||
rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams()) |
||||
size := chunkCount * chunkSize |
||||
fileHash, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) |
||||
// wait until all chunks stored
|
||||
wait() |
||||
if err != nil { |
||||
t.Fatal(err.Error()) |
||||
} |
||||
errc := make(chan error, 1) |
||||
waitPeerErrC = make(chan error) |
||||
quitC := make(chan struct{}) |
||||
defer close(quitC) |
||||
|
||||
action := func(ctx context.Context) error { |
||||
// each node Subscribes to each other's swarmChunkServerStreamName
|
||||
// need to wait till an aynchronous process registers the peers in streamer.peers
|
||||
// that is used by Subscribe
|
||||
// using a global err channel to share betweem action and node service
|
||||
i := 0 |
||||
for err := range waitPeerErrC { |
||||
if err != nil { |
||||
return fmt.Errorf("error waiting for peers: %s", err) |
||||
} |
||||
i++ |
||||
if i == nodes { |
||||
break |
||||
} |
||||
} |
||||
|
||||
// each node subscribes to the upstream swarm chunk server stream
|
||||
// which responds to chunk retrieve requests all but the last node in the chain does not
|
||||
for j := 0; j < nodes-1; j++ { |
||||
id := sim.IDs[j] |
||||
err := sim.CallClient(id, func(client *rpc.Client) error { |
||||
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
rpcSubscriptionsWg.Add(1) |
||||
go func() { |
||||
<-doneC |
||||
rpcSubscriptionsWg.Done() |
||||
}() |
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second) |
||||
defer cancel() |
||||
sid := sim.IDs[j+1] |
||||
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top) |
||||
}) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
// create a retriever FileStore for the pivot node
|
||||
delivery := deliveries[sim.IDs[0]] |
||||
retrieveFunc := func(chunk *storage.Chunk) error { |
||||
return delivery.RequestFromPeers(chunk.Addr[:], skipCheck) |
||||
} |
||||
netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc) |
||||
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) |
||||
|
||||
go func() { |
||||
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
|
||||
// we must wait for the peer connections to have started before requesting
|
||||
n, err := readAll(fileStore, fileHash) |
||||
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err) |
||||
if err != nil { |
||||
errc <- fmt.Errorf("requesting chunks action error: %v", err) |
||||
} |
||||
}() |
||||
return nil |
||||
} |
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) { |
||||
select { |
||||
case err := <-errc: |
||||
return false, err |
||||
case <-ctx.Done(): |
||||
return false, ctx.Err() |
||||
default: |
||||
} |
||||
var total int64 |
||||
err := sim.CallClient(id, func(client *rpc.Client) error { |
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second) |
||||
defer cancel() |
||||
return client.CallContext(ctx, &total, "stream_readAll", common.BytesToHash(fileHash)) |
||||
}) |
||||
log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err)) |
||||
if err != nil || total != int64(size) { |
||||
return false, nil |
||||
} |
||||
return true, nil |
||||
} |
||||
|
||||
conf.Step = &simulations.Step{ |
||||
Action: action, |
||||
Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]), |
||||
// we are only testing the pivot node (net.Nodes[0])
|
||||
Expect: &simulations.Expectation{ |
||||
Nodes: sim.IDs[0:1], |
||||
Check: check, |
||||
}, |
||||
} |
||||
startedAt := time.Now() |
||||
timeout := 300 * time.Second |
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) |
||||
defer cancel() |
||||
result, err := sim.Run(ctx, conf) |
||||
finishedAt := time.Now() |
||||
if err != nil { |
||||
t.Fatalf("Setting up simulation failed: %v", err) |
||||
} |
||||
if result.Error != nil { |
||||
t.Fatalf("Simulation failed: %s", result.Error) |
||||
} |
||||
streamTesting.CheckResult(t, result, startedAt, finishedAt) |
||||
} |
||||
|
||||
func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) { |
||||
for chunks := 32; chunks <= 128; chunks *= 2 { |
||||
for i := 2; i < 32; i *= 2 { |
||||
b.Run( |
||||
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks), |
||||
func(b *testing.B) { |
||||
benchmarkDeliveryFromNodes(b, i, 1, chunks, true) |
||||
}, |
||||
) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) { |
||||
for chunks := 32; chunks <= 128; chunks *= 2 { |
||||
for i := 2; i < 32; i *= 2 { |
||||
b.Run( |
||||
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks), |
||||
func(b *testing.B) { |
||||
benchmarkDeliveryFromNodes(b, i, 1, chunks, false) |
||||
}, |
||||
) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) { |
||||
defaultSkipCheck = skipCheck |
||||
toAddr = network.NewAddrFromNodeID |
||||
createStoreFunc = createTestLocalStorageFromSim |
||||
registries = make(map[discover.NodeID]*TestRegistry) |
||||
|
||||
timeout := 300 * time.Second |
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) |
||||
defer cancel() |
||||
|
||||
conf := &streamTesting.RunConfig{ |
||||
Adapter: *adapter, |
||||
NodeCount: nodes, |
||||
ConnLevel: conns, |
||||
ToAddr: toAddr, |
||||
Services: services, |
||||
EnableMsgEvents: false, |
||||
} |
||||
sim, teardown, err := streamTesting.NewSimulation(conf) |
||||
var rpcSubscriptionsWg sync.WaitGroup |
||||
defer func() { |
||||
rpcSubscriptionsWg.Wait() |
||||
teardown() |
||||
}() |
||||
if err != nil { |
||||
b.Fatal(err.Error()) |
||||
} |
||||
|
||||
stores = make(map[discover.NodeID]storage.ChunkStore) |
||||
deliveries = make(map[discover.NodeID]*Delivery) |
||||
for i, id := range sim.IDs { |
||||
stores[id] = sim.Stores[i] |
||||
} |
||||
peerCount = func(id discover.NodeID) int { |
||||
if sim.IDs[0] == id || sim.IDs[nodes-1] == id { |
||||
return 1 |
||||
} |
||||
return 2 |
||||
} |
||||
// wait channel for all nodes all peer connections to set up
|
||||
waitPeerErrC = make(chan error) |
||||
|
||||
// create a FileStore for the last node in the chain which we are gonna write to
|
||||
remoteFileStore := storage.NewFileStore(sim.Stores[nodes-1], storage.NewFileStoreParams()) |
||||
|
||||
// channel to signal simulation initialisation with action call complete
|
||||
// or node disconnections
|
||||
disconnectC := make(chan error) |
||||
quitC := make(chan struct{}) |
||||
|
||||
initC := make(chan error) |
||||
|
||||
action := func(ctx context.Context) error { |
||||
// each node Subscribes to each other's swarmChunkServerStreamName
|
||||
// need to wait till an aynchronous process registers the peers in streamer.peers
|
||||
// that is used by Subscribe
|
||||
// waitPeerErrC using a global err channel to share betweem action and node service
|
||||
i := 0 |
||||
for err := range waitPeerErrC { |
||||
if err != nil { |
||||
return fmt.Errorf("error waiting for peers: %s", err) |
||||
} |
||||
i++ |
||||
if i == nodes { |
||||
break |
||||
} |
||||
} |
||||
var err error |
||||
// each node except the last one subscribes to the upstream swarm chunk server stream
|
||||
// which responds to chunk retrieve requests
|
||||
for j := 0; j < nodes-1; j++ { |
||||
id := sim.IDs[j] |
||||
err = sim.CallClient(id, func(client *rpc.Client) error { |
||||
doneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
rpcSubscriptionsWg.Add(1) |
||||
go func() { |
||||
<-doneC |
||||
rpcSubscriptionsWg.Done() |
||||
}() |
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second) |
||||
defer cancel() |
||||
sid := sim.IDs[j+1] // the upstream peer's id
|
||||
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top) |
||||
}) |
||||
if err != nil { |
||||
break |
||||
} |
||||
} |
||||
initC <- err |
||||
return nil |
||||
} |
||||
|
||||
// the check function is only triggered when the benchmark finishes
|
||||
trigger := make(chan discover.NodeID) |
||||
check := func(ctx context.Context, id discover.NodeID) (_ bool, err error) { |
||||
return true, nil |
||||
} |
||||
|
||||
conf.Step = &simulations.Step{ |
||||
Action: action, |
||||
Trigger: trigger, |
||||
// we are only testing the pivot node (net.Nodes[0])
|
||||
Expect: &simulations.Expectation{ |
||||
Nodes: sim.IDs[0:1], |
||||
Check: check, |
||||
}, |
||||
} |
||||
|
||||
// run the simulation in the background
|
||||
errc := make(chan error) |
||||
go func() { |
||||
_, err := sim.Run(ctx, conf) |
||||
close(quitC) |
||||
errc <- err |
||||
}() |
||||
|
||||
// wait for simulation action to complete stream subscriptions
|
||||
err = <-initC |
||||
if err != nil { |
||||
b.Fatalf("simulation failed to initialise. expected no error. got %v", err) |
||||
} |
||||
|
||||
// create a retriever FileStore for the pivot node
|
||||
// by now deliveries are set for each node by the streamer service
|
||||
delivery := deliveries[sim.IDs[0]] |
||||
retrieveFunc := func(chunk *storage.Chunk) error { |
||||
return delivery.RequestFromPeers(chunk.Addr[:], skipCheck) |
||||
} |
||||
netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc) |
||||
|
||||
// benchmark loop
|
||||
b.ResetTimer() |
||||
b.StopTimer() |
||||
Loop: |
||||
for i := 0; i < b.N; i++ { |
||||
// uploading chunkCount random chunks to the last node
|
||||
hashes := make([]storage.Address, chunkCount) |
||||
for i := 0; i < chunkCount; i++ { |
||||
// create actual size real chunks
|
||||
hash, wait, err := remoteFileStore.Store(io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false) |
||||
// wait until all chunks stored
|
||||
wait() |
||||
if err != nil { |
||||
b.Fatalf("expected no error. got %v", err) |
||||
} |
||||
// collect the hashes
|
||||
hashes[i] = hash |
||||
} |
||||
// now benchmark the actual retrieval
|
||||
// netstore.Get is called for each hash in a go routine and errors are collected
|
||||
b.StartTimer() |
||||
errs := make(chan error) |
||||
for _, hash := range hashes { |
||||
go func(h storage.Address) { |
||||
_, err := netStore.Get(h) |
||||
log.Warn("test check netstore get", "hash", h, "err", err) |
||||
errs <- err |
||||
}(hash) |
||||
} |
||||
// count and report retrieval errors
|
||||
// if there are misses then chunk timeout is too low for the distance and volume (?)
|
||||
var total, misses int |
||||
for err := range errs { |
||||
if err != nil { |
||||
log.Warn(err.Error()) |
||||
misses++ |
||||
} |
||||
total++ |
||||
if total == chunkCount { |
||||
break |
||||
} |
||||
} |
||||
b.StopTimer() |
||||
|
||||
select { |
||||
case err = <-disconnectC: |
||||
if err != nil { |
||||
break Loop |
||||
} |
||||
default: |
||||
} |
||||
|
||||
if misses > 0 { |
||||
err = fmt.Errorf("%v chunk not found out of %v", misses, total) |
||||
break Loop |
||||
} |
||||
} |
||||
|
||||
select { |
||||
case <-quitC: |
||||
case trigger <- sim.IDs[0]: |
||||
} |
||||
if err == nil { |
||||
err = <-errc |
||||
} else { |
||||
if e := <-errc; e != nil { |
||||
b.Errorf("sim.Run function error: %v", e) |
||||
} |
||||
} |
||||
|
||||
// benchmark over, trigger the check function to conclude the simulation
|
||||
if err != nil { |
||||
b.Fatalf("expected no error. got %v", err) |
||||
} |
||||
} |
||||
|
||||
func createTestLocalStorageFromSim(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) { |
||||
return stores[id], nil |
||||
} |
@ -0,0 +1,42 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package intervals |
||||
|
||||
import ( |
||||
"io/ioutil" |
||||
"os" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/state" |
||||
) |
||||
|
||||
// TestDBStore tests basic functionality of DBStore.
|
||||
func TestDBStore(t *testing.T) { |
||||
dir, err := ioutil.TempDir("", "intervals_test_db_store") |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
defer os.RemoveAll(dir) |
||||
|
||||
store, err := state.NewDBStore(dir) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
defer store.Close() |
||||
|
||||
testStore(t, store) |
||||
} |
@ -0,0 +1,206 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package intervals |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"strconv" |
||||
"sync" |
||||
) |
||||
|
||||
// Intervals store a list of intervals. Its purpose is to provide
|
||||
// methods to add new intervals and retrieve missing intervals that
|
||||
// need to be added.
|
||||
// It may be used in synchronization of streaming data to persist
|
||||
// retrieved data ranges between sessions.
|
||||
type Intervals struct { |
||||
start uint64 |
||||
ranges [][2]uint64 |
||||
mu sync.RWMutex |
||||
} |
||||
|
||||
// New creates a new instance of Intervals.
|
||||
// Start argument limits the lower bound of intervals.
|
||||
// No range bellow start bound will be added by Add method or
|
||||
// returned by Next method. This limit may be used for
|
||||
// tracking "live" synchronization, where the sync session
|
||||
// starts from a specific value, and if "live" sync intervals
|
||||
// need to be merged with historical ones, it can be safely done.
|
||||
func NewIntervals(start uint64) *Intervals { |
||||
return &Intervals{ |
||||
start: start, |
||||
} |
||||
} |
||||
|
||||
// Add adds a new range to intervals. Range start and end are values
|
||||
// are both inclusive.
|
||||
func (i *Intervals) Add(start, end uint64) { |
||||
i.mu.Lock() |
||||
defer i.mu.Unlock() |
||||
|
||||
i.add(start, end) |
||||
} |
||||
|
||||
func (i *Intervals) add(start, end uint64) { |
||||
if start < i.start { |
||||
start = i.start |
||||
} |
||||
if end < i.start { |
||||
return |
||||
} |
||||
minStartJ := -1 |
||||
maxEndJ := -1 |
||||
j := 0 |
||||
for ; j < len(i.ranges); j++ { |
||||
if minStartJ < 0 { |
||||
if (start <= i.ranges[j][0] && end+1 >= i.ranges[j][0]) || (start <= i.ranges[j][1]+1 && end+1 >= i.ranges[j][1]) { |
||||
if i.ranges[j][0] < start { |
||||
start = i.ranges[j][0] |
||||
} |
||||
minStartJ = j |
||||
} |
||||
} |
||||
if (start <= i.ranges[j][1] && end+1 >= i.ranges[j][1]) || (start <= i.ranges[j][0] && end+1 >= i.ranges[j][0]) { |
||||
if i.ranges[j][1] > end { |
||||
end = i.ranges[j][1] |
||||
} |
||||
maxEndJ = j |
||||
} |
||||
if end+1 <= i.ranges[j][0] { |
||||
break |
||||
} |
||||
} |
||||
if minStartJ < 0 && maxEndJ < 0 { |
||||
i.ranges = append(i.ranges[:j], append([][2]uint64{{start, end}}, i.ranges[j:]...)...) |
||||
return |
||||
} |
||||
if minStartJ >= 0 { |
||||
i.ranges[minStartJ][0] = start |
||||
} |
||||
if maxEndJ >= 0 { |
||||
i.ranges[maxEndJ][1] = end |
||||
} |
||||
if minStartJ >= 0 && maxEndJ >= 0 && minStartJ != maxEndJ { |
||||
i.ranges[maxEndJ][0] = start |
||||
i.ranges = append(i.ranges[:minStartJ], i.ranges[maxEndJ:]...) |
||||
} |
||||
} |
||||
|
||||
// Merge adds all the intervals from the the m Interval to current one.
|
||||
func (i *Intervals) Merge(m *Intervals) { |
||||
m.mu.RLock() |
||||
defer m.mu.RUnlock() |
||||
i.mu.Lock() |
||||
defer i.mu.Unlock() |
||||
|
||||
for _, r := range m.ranges { |
||||
i.add(r[0], r[1]) |
||||
} |
||||
} |
||||
|
||||
// Next returns the first range interval that is not fulfilled. Returned
|
||||
// start and end values are both inclusive, meaning that the whole range
|
||||
// including start and end need to be added in order to full the gap
|
||||
// in intervals.
|
||||
// Returned value for end is 0 if the next interval is after the whole
|
||||
// range that is stored in Intervals. Zero end value represents no limit
|
||||
// on the next interval length.
|
||||
func (i *Intervals) Next() (start, end uint64) { |
||||
i.mu.RLock() |
||||
defer i.mu.RUnlock() |
||||
|
||||
l := len(i.ranges) |
||||
if l == 0 { |
||||
return i.start, 0 |
||||
} |
||||
if i.ranges[0][0] != i.start { |
||||
return i.start, i.ranges[0][0] - 1 |
||||
} |
||||
if l == 1 { |
||||
return i.ranges[0][1] + 1, 0 |
||||
} |
||||
return i.ranges[0][1] + 1, i.ranges[1][0] - 1 |
||||
} |
||||
|
||||
// Last returns the value that is at the end of the last interval.
|
||||
func (i *Intervals) Last() (end uint64) { |
||||
i.mu.RLock() |
||||
defer i.mu.RUnlock() |
||||
|
||||
l := len(i.ranges) |
||||
if l == 0 { |
||||
return 0 |
||||
} |
||||
return i.ranges[l-1][1] |
||||
} |
||||
|
||||
// String returns a descriptive representation of range intervals
|
||||
// in [] notation, as a list of two element vectors.
|
||||
func (i *Intervals) String() string { |
||||
return fmt.Sprint(i.ranges) |
||||
} |
||||
|
||||
// MarshalBinary encodes Intervals parameters into a semicolon separated list.
|
||||
// The first element in the list is base36-encoded start value. The following
|
||||
// elements are two base36-encoded value ranges separated by comma.
|
||||
func (i *Intervals) MarshalBinary() (data []byte, err error) { |
||||
d := make([][]byte, len(i.ranges)+1) |
||||
d[0] = []byte(strconv.FormatUint(i.start, 36)) |
||||
for j := range i.ranges { |
||||
r := i.ranges[j] |
||||
d[j+1] = []byte(strconv.FormatUint(r[0], 36) + "," + strconv.FormatUint(r[1], 36)) |
||||
} |
||||
return bytes.Join(d, []byte(";")), nil |
||||
} |
||||
|
||||
// UnmarshalBinary decodes data according to the Intervals.MarshalBinary format.
|
||||
func (i *Intervals) UnmarshalBinary(data []byte) (err error) { |
||||
d := bytes.Split(data, []byte(";")) |
||||
l := len(d) |
||||
if l == 0 { |
||||
return nil |
||||
} |
||||
if l >= 1 { |
||||
i.start, err = strconv.ParseUint(string(d[0]), 36, 64) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
if l == 1 { |
||||
return nil |
||||
} |
||||
|
||||
i.ranges = make([][2]uint64, 0, l-1) |
||||
for j := 1; j < l; j++ { |
||||
r := bytes.SplitN(d[j], []byte(","), 2) |
||||
if len(r) < 2 { |
||||
return fmt.Errorf("range %d has less then 2 elements", j) |
||||
} |
||||
start, err := strconv.ParseUint(string(r[0]), 36, 64) |
||||
if err != nil { |
||||
return fmt.Errorf("parsing the first element in range %d: %v", j, err) |
||||
} |
||||
end, err := strconv.ParseUint(string(r[1]), 36, 64) |
||||
if err != nil { |
||||
return fmt.Errorf("parsing the second element in range %d: %v", j, err) |
||||
} |
||||
i.ranges = append(i.ranges, [2]uint64{start, end}) |
||||
} |
||||
|
||||
return nil |
||||
} |
@ -0,0 +1,395 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package intervals |
||||
|
||||
import "testing" |
||||
|
||||
// Test tests Interval methods Add, Next and Last for various
|
||||
// initial state.
|
||||
func Test(t *testing.T) { |
||||
for i, tc := range []struct { |
||||
startLimit uint64 |
||||
initial [][2]uint64 |
||||
start uint64 |
||||
end uint64 |
||||
expected string |
||||
nextStart uint64 |
||||
nextEnd uint64 |
||||
last uint64 |
||||
}{ |
||||
{ |
||||
initial: nil, |
||||
start: 0, |
||||
end: 0, |
||||
expected: "[[0 0]]", |
||||
nextStart: 1, |
||||
nextEnd: 0, |
||||
last: 0, |
||||
}, |
||||
{ |
||||
initial: nil, |
||||
start: 0, |
||||
end: 10, |
||||
expected: "[[0 10]]", |
||||
nextStart: 11, |
||||
nextEnd: 0, |
||||
last: 10, |
||||
}, |
||||
{ |
||||
initial: nil, |
||||
start: 5, |
||||
end: 15, |
||||
expected: "[[5 15]]", |
||||
nextStart: 0, |
||||
nextEnd: 4, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 0}}, |
||||
start: 0, |
||||
end: 0, |
||||
expected: "[[0 0]]", |
||||
nextStart: 1, |
||||
nextEnd: 0, |
||||
last: 0, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 0}}, |
||||
start: 5, |
||||
end: 15, |
||||
expected: "[[0 0] [5 15]]", |
||||
nextStart: 1, |
||||
nextEnd: 4, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 5, |
||||
end: 15, |
||||
expected: "[[5 15]]", |
||||
nextStart: 0, |
||||
nextEnd: 4, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 5, |
||||
end: 20, |
||||
expected: "[[5 20]]", |
||||
nextStart: 0, |
||||
nextEnd: 4, |
||||
last: 20, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 10, |
||||
end: 20, |
||||
expected: "[[5 20]]", |
||||
nextStart: 0, |
||||
nextEnd: 4, |
||||
last: 20, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 0, |
||||
end: 20, |
||||
expected: "[[0 20]]", |
||||
nextStart: 21, |
||||
nextEnd: 0, |
||||
last: 20, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 2, |
||||
end: 10, |
||||
expected: "[[2 15]]", |
||||
nextStart: 0, |
||||
nextEnd: 1, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 2, |
||||
end: 4, |
||||
expected: "[[2 15]]", |
||||
nextStart: 0, |
||||
nextEnd: 1, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 2, |
||||
end: 5, |
||||
expected: "[[2 15]]", |
||||
nextStart: 0, |
||||
nextEnd: 1, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 2, |
||||
end: 3, |
||||
expected: "[[2 3] [5 15]]", |
||||
nextStart: 0, |
||||
nextEnd: 1, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{5, 15}}, |
||||
start: 2, |
||||
end: 4, |
||||
expected: "[[2 15]]", |
||||
nextStart: 0, |
||||
nextEnd: 1, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 1}, {5, 15}}, |
||||
start: 2, |
||||
end: 4, |
||||
expected: "[[0 15]]", |
||||
nextStart: 16, |
||||
nextEnd: 0, |
||||
last: 15, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}}, |
||||
start: 2, |
||||
end: 10, |
||||
expected: "[[0 10] [15 20]]", |
||||
nextStart: 11, |
||||
nextEnd: 14, |
||||
last: 20, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}}, |
||||
start: 8, |
||||
end: 18, |
||||
expected: "[[0 5] [8 20]]", |
||||
nextStart: 6, |
||||
nextEnd: 7, |
||||
last: 20, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}}, |
||||
start: 2, |
||||
end: 17, |
||||
expected: "[[0 20]]", |
||||
nextStart: 21, |
||||
nextEnd: 0, |
||||
last: 20, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}}, |
||||
start: 2, |
||||
end: 25, |
||||
expected: "[[0 25]]", |
||||
nextStart: 26, |
||||
nextEnd: 0, |
||||
last: 25, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}}, |
||||
start: 5, |
||||
end: 14, |
||||
expected: "[[0 20]]", |
||||
nextStart: 21, |
||||
nextEnd: 0, |
||||
last: 20, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}}, |
||||
start: 6, |
||||
end: 14, |
||||
expected: "[[0 20]]", |
||||
nextStart: 21, |
||||
nextEnd: 0, |
||||
last: 20, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}}, |
||||
start: 6, |
||||
end: 29, |
||||
expected: "[[0 40]]", |
||||
nextStart: 41, |
||||
nextEnd: 0, |
||||
last: 40, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}}, |
||||
start: 3, |
||||
end: 55, |
||||
expected: "[[0 60]]", |
||||
nextStart: 61, |
||||
nextEnd: 0, |
||||
last: 60, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}}, |
||||
start: 21, |
||||
end: 49, |
||||
expected: "[[0 5] [15 60]]", |
||||
nextStart: 6, |
||||
nextEnd: 14, |
||||
last: 60, |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}}, |
||||
start: 0, |
||||
end: 100, |
||||
expected: "[[0 100]]", |
||||
nextStart: 101, |
||||
nextEnd: 0, |
||||
last: 100, |
||||
}, |
||||
{ |
||||
startLimit: 100, |
||||
initial: nil, |
||||
start: 0, |
||||
end: 0, |
||||
expected: "[]", |
||||
nextStart: 100, |
||||
nextEnd: 0, |
||||
last: 0, |
||||
}, |
||||
{ |
||||
startLimit: 100, |
||||
initial: nil, |
||||
start: 20, |
||||
end: 30, |
||||
expected: "[]", |
||||
nextStart: 100, |
||||
nextEnd: 0, |
||||
last: 0, |
||||
}, |
||||
{ |
||||
startLimit: 100, |
||||
initial: nil, |
||||
start: 50, |
||||
end: 100, |
||||
expected: "[[100 100]]", |
||||
nextStart: 101, |
||||
nextEnd: 0, |
||||
last: 100, |
||||
}, |
||||
{ |
||||
startLimit: 100, |
||||
initial: nil, |
||||
start: 50, |
||||
end: 110, |
||||
expected: "[[100 110]]", |
||||
nextStart: 111, |
||||
nextEnd: 0, |
||||
last: 110, |
||||
}, |
||||
{ |
||||
startLimit: 100, |
||||
initial: nil, |
||||
start: 120, |
||||
end: 130, |
||||
expected: "[[120 130]]", |
||||
nextStart: 100, |
||||
nextEnd: 119, |
||||
last: 130, |
||||
}, |
||||
{ |
||||
startLimit: 100, |
||||
initial: nil, |
||||
start: 120, |
||||
end: 130, |
||||
expected: "[[120 130]]", |
||||
nextStart: 100, |
||||
nextEnd: 119, |
||||
last: 130, |
||||
}, |
||||
} { |
||||
intervals := NewIntervals(tc.startLimit) |
||||
intervals.ranges = tc.initial |
||||
intervals.Add(tc.start, tc.end) |
||||
got := intervals.String() |
||||
if got != tc.expected { |
||||
t.Errorf("interval #%d: expected %s, got %s", i, tc.expected, got) |
||||
} |
||||
nextStart, nextEnd := intervals.Next() |
||||
if nextStart != tc.nextStart { |
||||
t.Errorf("interval #%d, expected next start %d, got %d", i, tc.nextStart, nextStart) |
||||
} |
||||
if nextEnd != tc.nextEnd { |
||||
t.Errorf("interval #%d, expected next end %d, got %d", i, tc.nextEnd, nextEnd) |
||||
} |
||||
last := intervals.Last() |
||||
if last != tc.last { |
||||
t.Errorf("interval #%d, expected last %d, got %d", i, tc.last, last) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestMerge(t *testing.T) { |
||||
for i, tc := range []struct { |
||||
initial [][2]uint64 |
||||
merge [][2]uint64 |
||||
expected string |
||||
}{ |
||||
{ |
||||
initial: nil, |
||||
merge: nil, |
||||
expected: "[]", |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{10, 20}}, |
||||
merge: nil, |
||||
expected: "[[10 20]]", |
||||
}, |
||||
{ |
||||
initial: nil, |
||||
merge: [][2]uint64{{15, 25}}, |
||||
expected: "[[15 25]]", |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 100}}, |
||||
merge: [][2]uint64{{150, 250}}, |
||||
expected: "[[0 100] [150 250]]", |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 100}}, |
||||
merge: [][2]uint64{{101, 250}}, |
||||
expected: "[[0 250]]", |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 10}, {30, 40}}, |
||||
merge: [][2]uint64{{20, 25}, {41, 50}}, |
||||
expected: "[[0 10] [20 25] [30 50]]", |
||||
}, |
||||
{ |
||||
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}}, |
||||
merge: [][2]uint64{{6, 25}}, |
||||
expected: "[[0 25] [30 40] [50 60]]", |
||||
}, |
||||
} { |
||||
intervals := NewIntervals(0) |
||||
intervals.ranges = tc.initial |
||||
m := NewIntervals(0) |
||||
m.ranges = tc.merge |
||||
|
||||
intervals.Merge(m) |
||||
|
||||
got := intervals.String() |
||||
if got != tc.expected { |
||||
t.Errorf("interval #%d: expected %s, got %s", i, tc.expected, got) |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,80 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package intervals |
||||
|
||||
import ( |
||||
"errors" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/state" |
||||
) |
||||
|
||||
var ErrNotFound = errors.New("not found") |
||||
|
||||
// TestInmemoryStore tests basic functionality of InmemoryStore.
|
||||
func TestInmemoryStore(t *testing.T) { |
||||
testStore(t, state.NewInmemoryStore()) |
||||
} |
||||
|
||||
// testStore is a helper function to test various Store implementations.
|
||||
func testStore(t *testing.T, s state.Store) { |
||||
key1 := "key1" |
||||
i1 := NewIntervals(0) |
||||
i1.Add(10, 20) |
||||
if err := s.Put(key1, i1); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
i := &Intervals{} |
||||
err := s.Get(key1, i) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if i.String() != i1.String() { |
||||
t.Errorf("expected interval %s, got %s", i1, i) |
||||
} |
||||
|
||||
key2 := "key2" |
||||
i2 := NewIntervals(0) |
||||
i2.Add(10, 20) |
||||
if err := s.Put(key2, i2); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
err = s.Get(key2, i) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if i.String() != i2.String() { |
||||
t.Errorf("expected interval %s, got %s", i2, i) |
||||
} |
||||
|
||||
if err := s.Delete(key1); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if err := s.Get(key1, i); err != state.ErrNotFound { |
||||
t.Errorf("expected error %v, got %s", state.ErrNotFound, err) |
||||
} |
||||
if err := s.Get(key2, i); err != nil { |
||||
t.Errorf("expected error %v, got %s", nil, err) |
||||
} |
||||
|
||||
if err := s.Delete(key2); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
if err := s.Get(key2, i); err != state.ErrNotFound { |
||||
t.Errorf("expected error %v, got %s", state.ErrNotFound, err) |
||||
} |
||||
} |
@ -0,0 +1,313 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream |
||||
|
||||
import ( |
||||
"context" |
||||
crand "crypto/rand" |
||||
"encoding/binary" |
||||
"fmt" |
||||
"io" |
||||
"sync" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/node" |
||||
"github.com/ethereum/go-ethereum/p2p/discover" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations" |
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" |
||||
"github.com/ethereum/go-ethereum/rpc" |
||||
"github.com/ethereum/go-ethereum/swarm/network" |
||||
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing" |
||||
"github.com/ethereum/go-ethereum/swarm/state" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
var ( |
||||
externalStreamName = "externalStream" |
||||
externalStreamSessionAt uint64 = 50 |
||||
externalStreamMaxKeys uint64 = 100 |
||||
) |
||||
|
||||
func newIntervalsStreamerService(ctx *adapters.ServiceContext) (node.Service, error) { |
||||
id := ctx.Config.ID |
||||
addr := toAddr(id) |
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams()) |
||||
store := stores[id].(*storage.LocalStore) |
||||
db := storage.NewDBAPI(store) |
||||
delivery := NewDelivery(kad, db) |
||||
deliveries[id] = delivery |
||||
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{ |
||||
SkipCheck: defaultSkipCheck, |
||||
}) |
||||
|
||||
r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) { |
||||
return newTestExternalClient(db), nil |
||||
}) |
||||
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) { |
||||
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil |
||||
}) |
||||
|
||||
go func() { |
||||
waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id)) |
||||
}() |
||||
return &TestExternalRegistry{r}, nil |
||||
} |
||||
|
||||
func TestIntervals(t *testing.T) { |
||||
testIntervals(t, true, nil, false) |
||||
testIntervals(t, false, NewRange(9, 26), false) |
||||
testIntervals(t, true, NewRange(9, 26), false) |
||||
|
||||
testIntervals(t, true, nil, true) |
||||
testIntervals(t, false, NewRange(9, 26), true) |
||||
testIntervals(t, true, NewRange(9, 26), true) |
||||
} |
||||
|
||||
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) { |
||||
nodes := 2 |
||||
chunkCount := dataChunkCount |
||||
|
||||
defer setDefaultSkipCheck(defaultSkipCheck) |
||||
defaultSkipCheck = skipCheck |
||||
|
||||
toAddr = network.NewAddrFromNodeID |
||||
conf := &streamTesting.RunConfig{ |
||||
Adapter: *adapter, |
||||
NodeCount: nodes, |
||||
ConnLevel: 1, |
||||
ToAddr: toAddr, |
||||
Services: services, |
||||
DefaultService: "intervalsStreamer", |
||||
} |
||||
|
||||
sim, teardown, err := streamTesting.NewSimulation(conf) |
||||
var rpcSubscriptionsWg sync.WaitGroup |
||||
defer func() { |
||||
rpcSubscriptionsWg.Wait() |
||||
teardown() |
||||
}() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
stores = make(map[discover.NodeID]storage.ChunkStore) |
||||
deliveries = make(map[discover.NodeID]*Delivery) |
||||
for i, id := range sim.IDs { |
||||
stores[id] = sim.Stores[i] |
||||
} |
||||
|
||||
peerCount = func(id discover.NodeID) int { |
||||
return 1 |
||||
} |
||||
|
||||
fileStore := storage.NewFileStore(sim.Stores[0], storage.NewFileStoreParams()) |
||||
size := chunkCount * chunkSize |
||||
_, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false) |
||||
wait() |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
errc := make(chan error, 1) |
||||
waitPeerErrC = make(chan error) |
||||
quitC := make(chan struct{}) |
||||
defer close(quitC) |
||||
|
||||
action := func(ctx context.Context) error { |
||||
i := 0 |
||||
for err := range waitPeerErrC { |
||||
if err != nil { |
||||
return fmt.Errorf("error waiting for peers: %s", err) |
||||
} |
||||
i++ |
||||
if i == nodes { |
||||
break |
||||
} |
||||
} |
||||
|
||||
id := sim.IDs[1] |
||||
|
||||
err := sim.CallClient(id, func(client *rpc.Client) error { |
||||
|
||||
sid := sim.IDs[0] |
||||
|
||||
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
rpcSubscriptionsWg.Add(1) |
||||
go func() { |
||||
<-doneC |
||||
rpcSubscriptionsWg.Done() |
||||
}() |
||||
ctx, cancel := context.WithTimeout(ctx, 100*time.Second) |
||||
defer cancel() |
||||
|
||||
err = client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(externalStreamName, "", live), history, Top) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
liveErrC := make(chan error) |
||||
historyErrC := make(chan error) |
||||
|
||||
go func() { |
||||
if !live { |
||||
close(liveErrC) |
||||
return |
||||
} |
||||
|
||||
var err error |
||||
defer func() { |
||||
liveErrC <- err |
||||
}() |
||||
|
||||
// live stream
|
||||
liveHashesChan := make(chan []byte) |
||||
liveSubscription, err := client.Subscribe(ctx, "stream", liveHashesChan, "getHashes", sid, NewStream(externalStreamName, "", true)) |
||||
if err != nil { |
||||
return |
||||
} |
||||
defer liveSubscription.Unsubscribe() |
||||
|
||||
i := externalStreamSessionAt |
||||
|
||||
// we have subscribed, enable notifications
|
||||
err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", true)) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
for { |
||||
select { |
||||
case hash := <-liveHashesChan: |
||||
h := binary.BigEndian.Uint64(hash) |
||||
if h != i { |
||||
err = fmt.Errorf("expected live hash %d, got %d", i, h) |
||||
return |
||||
} |
||||
i++ |
||||
if i > externalStreamMaxKeys { |
||||
return |
||||
} |
||||
case err = <-liveSubscription.Err(): |
||||
return |
||||
case <-ctx.Done(): |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
|
||||
go func() { |
||||
if live && history == nil { |
||||
close(historyErrC) |
||||
return |
||||
} |
||||
|
||||
var err error |
||||
defer func() { |
||||
historyErrC <- err |
||||
}() |
||||
|
||||
// history stream
|
||||
historyHashesChan := make(chan []byte) |
||||
historySubscription, err := client.Subscribe(ctx, "stream", historyHashesChan, "getHashes", sid, NewStream(externalStreamName, "", false)) |
||||
if err != nil { |
||||
return |
||||
} |
||||
defer historySubscription.Unsubscribe() |
||||
|
||||
var i uint64 |
||||
historyTo := externalStreamMaxKeys |
||||
if history != nil { |
||||
i = history.From |
||||
if history.To != 0 { |
||||
historyTo = history.To |
||||
} |
||||
} |
||||
|
||||
// we have subscribed, enable notifications
|
||||
err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", false)) |
||||
if err != nil { |
||||
return |
||||
} |
||||
|
||||
for { |
||||
select { |
||||
case hash := <-historyHashesChan: |
||||
h := binary.BigEndian.Uint64(hash) |
||||
if h != i { |
||||
err = fmt.Errorf("expected history hash %d, got %d", i, h) |
||||
return |
||||
} |
||||
i++ |
||||
if i > historyTo { |
||||
return |
||||
} |
||||
case err = <-historySubscription.Err(): |
||||
return |
||||
case <-ctx.Done(): |
||||
return |
||||
} |
||||
} |
||||
}() |
||||
|
||||
if err := <-liveErrC; err != nil { |
||||
return err |
||||
} |
||||
if err := <-historyErrC; err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
}) |
||||
return err |
||||
} |
||||
check := func(ctx context.Context, id discover.NodeID) (bool, error) { |
||||
select { |
||||
case err := <-errc: |
||||
return false, err |
||||
case <-ctx.Done(): |
||||
return false, ctx.Err() |
||||
default: |
||||
} |
||||
return true, nil |
||||
} |
||||
|
||||
conf.Step = &simulations.Step{ |
||||
Action: action, |
||||
Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]), |
||||
Expect: &simulations.Expectation{ |
||||
Nodes: sim.IDs[1:1], |
||||
Check: check, |
||||
}, |
||||
} |
||||
startedAt := time.Now() |
||||
timeout := 300 * time.Second |
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout) |
||||
defer cancel() |
||||
result, err := sim.Run(ctx, conf) |
||||
finishedAt := time.Now() |
||||
if err != nil { |
||||
t.Fatalf("Setting up simulation failed: %v", err) |
||||
} |
||||
if result.Error != nil { |
||||
t.Fatalf("Simulation failed: %s", result.Error) |
||||
} |
||||
streamTesting.CheckResult(t, result, startedAt, finishedAt) |
||||
} |
@ -0,0 +1,370 @@ |
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package stream |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/swarm/log" |
||||
bv "github.com/ethereum/go-ethereum/swarm/network/bitvector" |
||||
"github.com/ethereum/go-ethereum/swarm/storage" |
||||
) |
||||
|
||||
// Stream defines a unique stream identifier.
|
||||
type Stream struct { |
||||
// Name is used for Client and Server functions identification.
|
||||
Name string |
||||
// Key is the name of specific stream data.
|
||||
Key string |
||||
// Live defines whether the stream delivers only new data
|
||||
// for the specific stream.
|
||||
Live bool |
||||
} |
||||
|
||||
func NewStream(name string, key string, live bool) Stream { |
||||
return Stream{ |
||||
Name: name, |
||||
Key: key, |
||||
Live: live, |
||||
} |
||||
} |
||||
|
||||
// String return a stream id based on all Stream fields.
|
||||
func (s Stream) String() string { |
||||
t := "h" |
||||
if s.Live { |
||||
t = "l" |
||||
} |
||||
return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t) |
||||
} |
||||
|
||||
// SubcribeMsg is the protocol msg for requesting a stream(section)
|
||||
type SubscribeMsg struct { |
||||
Stream Stream |
||||
History *Range `rlp:"nil"` |
||||
Priority uint8 // delivered on priority channel
|
||||
} |
||||
|
||||
// RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
|
||||
// specific stream
|
||||
type RequestSubscriptionMsg struct { |
||||
Stream Stream |
||||
History *Range `rlp:"nil"` |
||||
Priority uint8 // delivered on priority channel
|
||||
} |
||||
|
||||
func (p *Peer) handleRequestSubscription(req *RequestSubscriptionMsg) (err error) { |
||||
log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream)) |
||||
return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority) |
||||
} |
||||
|
||||
func (p *Peer) handleSubscribeMsg(req *SubscribeMsg) (err error) { |
||||
metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1) |
||||
|
||||
defer func() { |
||||
if err != nil { |
||||
if e := p.Send(SubscribeErrorMsg{ |
||||
Error: err.Error(), |
||||
}); e != nil { |
||||
log.Error("send stream subscribe error message", "err", err) |
||||
} |
||||
} |
||||
}() |
||||
|
||||
log.Debug("received subscription", "from", p.streamer.addr.ID(), "peer", p.ID(), "stream", req.Stream, "history", req.History) |
||||
|
||||
f, err := p.streamer.GetServerFunc(req.Stream.Name) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
s, err := f(p, req.Stream.Key, req.Stream.Live) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
os, err := p.setServer(req.Stream, s, req.Priority) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
var from uint64 |
||||
var to uint64 |
||||
if !req.Stream.Live && req.History != nil { |
||||
from = req.History.From |
||||
to = req.History.To |
||||
} |
||||
|
||||
go func() { |
||||
if err := p.SendOfferedHashes(os, from, to); err != nil { |
||||
log.Warn("SendOfferedHashes dropping peer", "err", err) |
||||
p.Drop(err) |
||||
} |
||||
}() |
||||
|
||||
if req.Stream.Live && req.History != nil { |
||||
// subscribe to the history stream
|
||||
s, err := f(p, req.Stream.Key, false) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
go func() { |
||||
if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil { |
||||
log.Warn("SendOfferedHashes dropping peer", "err", err) |
||||
p.Drop(err) |
||||
} |
||||
}() |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
type SubscribeErrorMsg struct { |
||||
Error string |
||||
} |
||||
|
||||
func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) { |
||||
return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error) |
||||
} |
||||
|
||||
type UnsubscribeMsg struct { |
||||
Stream Stream |
||||
} |
||||
|
||||
func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error { |
||||
return p.removeServer(req.Stream) |
||||
} |
||||
|
||||
type QuitMsg struct { |
||||
Stream Stream |
||||
} |
||||
|
||||
func (p *Peer) handleQuitMsg(req *QuitMsg) error { |
||||
return p.removeClient(req.Stream) |
||||
} |
||||
|
||||
// OfferedHashesMsg is the protocol msg for offering to hand over a
|
||||
// stream section
|
||||
type OfferedHashesMsg struct { |
||||
Stream Stream // name of Stream
|
||||
From, To uint64 // peer and db-specific entry count
|
||||
Hashes []byte // stream of hashes (128)
|
||||
*HandoverProof // HandoverProof
|
||||
} |
||||
|
||||
// String pretty prints OfferedHashesMsg
|
||||
func (m OfferedHashesMsg) String() string { |
||||
return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize) |
||||
} |
||||
|
||||
// handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
|
||||
// Filter method
|
||||
func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error { |
||||
metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1) |
||||
|
||||
c, _, err := p.getOrSetClient(req.Stream, req.From, req.To) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
hashes := req.Hashes |
||||
want, err := bv.New(len(hashes) / HashSize) |
||||
if err != nil { |
||||
return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err) |
||||
} |
||||
wg := sync.WaitGroup{} |
||||
for i := 0; i < len(hashes); i += HashSize { |
||||
hash := hashes[i : i+HashSize] |
||||
|
||||
if wait := c.NeedData(hash); wait != nil { |
||||
want.Set(i/HashSize, true) |
||||
wg.Add(1) |
||||
// create request and wait until the chunk data arrives and is stored
|
||||
go func(w func()) { |
||||
w() |
||||
wg.Done() |
||||
}(wait) |
||||
} |
||||
} |
||||
// done := make(chan bool)
|
||||
// go func() {
|
||||
// wg.Wait()
|
||||
// close(done)
|
||||
// }()
|
||||
// go func() {
|
||||
// select {
|
||||
// case <-done:
|
||||
// s.next <- s.batchDone(p, req, hashes)
|
||||
// case <-time.After(1 * time.Second):
|
||||
// p.Drop(errors.New("timeout waiting for batch to be delivered"))
|
||||
// }
|
||||
// }()
|
||||
go func() { |
||||
wg.Wait() |
||||
select { |
||||
case c.next <- c.batchDone(p, req, hashes): |
||||
case <-c.quit: |
||||
} |
||||
}() |
||||
// only send wantedKeysMsg if all missing chunks of the previous batch arrived
|
||||
// except
|
||||
if c.stream.Live { |
||||
c.sessionAt = req.From |
||||
} |
||||
from, to := c.nextBatch(req.To + 1) |
||||
log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To) |
||||
if from == to { |
||||
return nil |
||||
} |
||||
|
||||
msg := &WantedHashesMsg{ |
||||
Stream: req.Stream, |
||||
Want: want.Bytes(), |
||||
From: from, |
||||
To: to, |
||||
} |
||||
go func() { |
||||
select { |
||||
case <-time.After(120 * time.Second): |
||||
log.Warn("handleOfferedHashesMsg timeout, so dropping peer") |
||||
p.Drop(errors.New("handle offered hashes timeout")) |
||||
return |
||||
case err := <-c.next: |
||||
if err != nil { |
||||
log.Warn("c.next dropping peer", "err", err) |
||||
p.Drop(err) |
||||
return |
||||
} |
||||
case <-c.quit: |
||||
return |
||||
} |
||||
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To) |
||||
err := p.SendPriority(msg, c.priority) |
||||
if err != nil { |
||||
log.Warn("SendPriority err, so dropping peer", "err", err) |
||||
p.Drop(err) |
||||
} |
||||
}() |
||||
return nil |
||||
} |
||||
|
||||
// WantedHashesMsg is the protocol msg data for signaling which hashes
|
||||
// offered in OfferedHashesMsg downstream peer actually wants sent over
|
||||
type WantedHashesMsg struct { |
||||
Stream Stream |
||||
Want []byte // bitvector indicating which keys of the batch needed
|
||||
From, To uint64 // next interval offset - empty if not to be continued
|
||||
} |
||||
|
||||
// String pretty prints WantedHashesMsg
|
||||
func (m WantedHashesMsg) String() string { |
||||
return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To) |
||||
} |
||||
|
||||
// handleWantedHashesMsg protocol msg handler
|
||||
// * sends the next batch of unsynced keys
|
||||
// * sends the actual data chunks as per WantedHashesMsg
|
||||
func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error { |
||||
metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1) |
||||
|
||||
log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To) |
||||
s, err := p.getServer(req.Stream) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
hashes := s.currentBatch |
||||
// launch in go routine since GetBatch blocks until new hashes arrive
|
||||
go func() { |
||||
if err := p.SendOfferedHashes(s, req.From, req.To); err != nil { |
||||
log.Warn("SendOfferedHashes dropping peer", "err", err) |
||||
p.Drop(err) |
||||
} |
||||
}() |
||||
// go p.SendOfferedHashes(s, req.From, req.To)
|
||||
l := len(hashes) / HashSize |
||||
|
||||
log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l) |
||||
want, err := bv.NewFromBytes(req.Want, l) |
||||
if err != nil { |
||||
return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err) |
||||
} |
||||
for i := 0; i < l; i++ { |
||||
if want.Get(i) { |
||||
metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1) |
||||
|
||||
hash := hashes[i*HashSize : (i+1)*HashSize] |
||||
data, err := s.GetData(hash) |
||||
if err != nil { |
||||
return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err) |
||||
} |
||||
chunk := storage.NewChunk(hash, nil) |
||||
chunk.SData = data |
||||
if length := len(chunk.SData); length < 9 { |
||||
log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr) |
||||
} |
||||
if err := p.Deliver(chunk, s.priority); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Handover represents a statement that the upstream peer hands over the stream section
|
||||
type Handover struct { |
||||
Stream Stream // name of stream
|
||||
Start, End uint64 // index of hashes
|
||||
Root []byte // Root hash for indexed segment inclusion proofs
|
||||
} |
||||
|
||||
// HandoverProof represents a signed statement that the upstream peer handed over the stream section
|
||||
type HandoverProof struct { |
||||
Sig []byte // Sign(Hash(Serialisation(Handover)))
|
||||
*Handover |
||||
} |
||||
|
||||
// Takeover represents a statement that downstream peer took over (stored all data)
|
||||
// handed over
|
||||
type Takeover Handover |
||||
|
||||
// TakeoverProof represents a signed statement that the downstream peer took over
|
||||
// the stream section
|
||||
type TakeoverProof struct { |
||||
Sig []byte // Sign(Hash(Serialisation(Takeover)))
|
||||
*Takeover |
||||
} |
||||
|
||||
// TakeoverProofMsg is the protocol msg sent by downstream peer
|
||||
type TakeoverProofMsg TakeoverProof |
||||
|
||||
// String pretty prints TakeoverProofMsg
|
||||
func (m TakeoverProofMsg) String() string { |
||||
return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig) |
||||
} |
||||
|
||||
func (p *Peer) handleTakeoverProofMsg(req *TakeoverProofMsg) error { |
||||
_, err := p.getServer(req.Stream) |
||||
// store the strongest takeoverproof for the stream in streamer
|
||||
return err |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue