swarm: network rewrite merge

pull/17041/head
ethersphere 6 years ago
parent 574378edb5
commit e187711c65
  1. 38
      .github/CODEOWNERS
  2. 560
      bmt/bmt.go
  3. 85
      bmt/bmt_r.go
  4. 481
      bmt/bmt_test.go
  5. 5
      cmd/p2psim/main.go
  6. 109
      cmd/swarm/config.go
  7. 129
      cmd/swarm/config_test.go
  8. 28
      cmd/swarm/db.go
  9. 85
      cmd/swarm/download.go
  10. 139
      cmd/swarm/export_test.go
  11. 127
      cmd/swarm/fs.go
  12. 234
      cmd/swarm/fs_test.go
  13. 6
      cmd/swarm/hash.go
  14. 321
      cmd/swarm/main.go
  15. 14
      cmd/swarm/manifest.go
  16. 132
      cmd/swarm/run_test.go
  17. 101
      cmd/swarm/swarm-smoke/main.go
  18. 184
      cmd/swarm/swarm-smoke/upload_and_sync.go
  19. 9
      cmd/swarm/upload.go
  20. 243
      cmd/swarm/upload_test.go
  21. 10
      p2p/metrics.go
  22. 7
      p2p/peer.go
  23. 7
      p2p/protocols/protocol.go
  24. 67
      p2p/simulations/network.go
  25. 2
      p2p/testing/protocolsession.go
  26. 35
      swarm/AUTHORS
  27. 26
      swarm/OWNERS
  28. 446
      swarm/api/api.go
  29. 58
      swarm/api/api_test.go
  30. 141
      swarm/api/client/client.go
  31. 51
      swarm/api/client/client_test.go
  32. 119
      swarm/api/config.go
  33. 20
      swarm/api/config_test.go
  34. 43
      swarm/api/filesystem.go
  35. 53
      swarm/api/filesystem_test.go
  36. 54
      swarm/api/http/error.go
  37. 15
      swarm/api/http/error_templates.go
  38. 12
      swarm/api/http/error_test.go
  39. 2
      swarm/api/http/roundtripper.go
  40. 675
      swarm/api/http/server.go
  41. 419
      swarm/api/http/server_test.go
  42. 3
      swarm/api/http/templates.go
  43. 185
      swarm/api/manifest.go
  44. 35
      swarm/api/manifest_test.go
  45. 32
      swarm/api/storage.go
  46. 14
      swarm/api/storage_test.go
  47. 32
      swarm/api/testapi.go
  48. 46
      swarm/api/uri.go
  49. 47
      swarm/api/uri_test.go
  50. 543
      swarm/bmt/bmt.go
  51. 85
      swarm/bmt/bmt_r.go
  52. 390
      swarm/bmt/bmt_test.go
  53. 15
      swarm/fuse/fuse_dir.go
  54. 43
      swarm/fuse/fuse_file.go
  55. 4
      swarm/fuse/swarmfs.go
  56. 1695
      swarm/fuse/swarmfs_test.go
  57. 156
      swarm/fuse/swarmfs_unix.go
  58. 18
      swarm/fuse/swarmfs_util.go
  59. 2278
      swarm/grafana_dashboards/ldbstore.json
  60. 3198
      swarm/grafana_dashboards/swarm.json
  61. 48
      swarm/log/log.go
  62. 2
      swarm/metrics/flags.go
  63. 92
      swarm/multihash/multihash.go
  64. 53
      swarm/multihash/multihash_test.go
  65. 152
      swarm/network/README.md
  66. 66
      swarm/network/bitvector/bitvector.go
  67. 104
      swarm/network/bitvector/bitvector_test.go
  68. 30
      swarm/network/common.go
  69. 232
      swarm/network/depo.go
  70. 210
      swarm/network/discovery.go
  71. 57
      swarm/network/discovery_test.go
  72. 150
      swarm/network/forwarding.go
  73. 514
      swarm/network/hive.go
  74. 108
      swarm/network/hive_test.go
  75. 765
      swarm/network/kademlia.go
  76. 173
      swarm/network/kademlia/address.go
  77. 96
      swarm/network/kademlia/address_test.go
  78. 350
      swarm/network/kademlia/kaddb.go
  79. 454
      swarm/network/kademlia/kademlia.go
  80. 392
      swarm/network/kademlia/kademlia_test.go
  81. 623
      swarm/network/kademlia_test.go
  82. 308
      swarm/network/messages.go
  83. 111
      swarm/network/priorityqueue/priorityqueue.go
  84. 97
      swarm/network/priorityqueue/priorityqueue_test.go
  85. 759
      swarm/network/protocol.go
  86. 225
      swarm/network/protocol_test.go
  87. 17
      swarm/network/simulations/discovery/discovery.go
  88. 586
      swarm/network/simulations/discovery/discovery_test.go
  89. 1
      swarm/network/simulations/discovery/jsonsnapshot.txt
  90. 144
      swarm/network/simulations/overlay.go
  91. 195
      swarm/network/simulations/overlay_test.go
  92. 449
      swarm/network/stream/common_test.go
  93. 272
      swarm/network/stream/delivery.go
  94. 699
      swarm/network/stream/delivery_test.go
  95. 42
      swarm/network/stream/intervals/dbstore_test.go
  96. 206
      swarm/network/stream/intervals/intervals.go
  97. 395
      swarm/network/stream/intervals/intervals_test.go
  98. 80
      swarm/network/stream/intervals/store_test.go
  99. 313
      swarm/network/stream/intervals_test.go
  100. 370
      swarm/network/stream/messages.go
  101. Some files were not shown because too many files have changed in this diff Show More

@ -1,12 +1,32 @@
# Lines starting with '#' are comments. # Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners. # Each line is a file pattern followed by one or more owners.
accounts/usbwallet @karalabe accounts/usbwallet @karalabe
consensus @karalabe consensus @karalabe
core/ @karalabe @holiman core/ @karalabe @holiman
eth/ @karalabe eth/ @karalabe
les/ @zsfelfoldi les/ @zsfelfoldi
light/ @zsfelfoldi light/ @zsfelfoldi
mobile/ @karalabe mobile/ @karalabe
p2p/ @fjl @zsfelfoldi p2p/ @fjl @zsfelfoldi
whisper/ @gballet @gluk256 swarm/bmt @zelig
swarm/dev @lmars
swarm/fuse @jmozah @holisticode
swarm/grafana_dashboards @nonsense
swarm/metrics @nonsense @holisticode
swarm/multihash @nolash
swarm/network/bitvector @zelig @janos @gbalint
swarm/network/priorityqueue @zelig @janos @gbalint
swarm/network/simulations @zelig
swarm/network/stream @janos @zelig @gbalint @holisticode @justelad
swarm/network/stream/intervals @janos
swarm/network/stream/testing @zelig
swarm/pot @zelig
swarm/pss @nolash @zelig @nonsense
swarm/services @zelig
swarm/state @justelad
swarm/storage/encryption @gbalint @zelig @nagydani
swarm/storage/mock @janos
swarm/storage/mru @nolash
swarm/testutil @lmars
whisper/ @gballet @gluk256

@ -1,560 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bmt provides a binary merkle tree implementation
package bmt
import (
"fmt"
"hash"
"io"
"strings"
"sync"
"sync/atomic"
)
/*
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
It is defined as the root hash of the binary merkle tree built over fixed size segments
of the underlying chunk using any base hash function (e.g keccak 256 SHA3)
It is used as the chunk hash function in swarm which in turn is the basis for the
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
segment is a substring of a chunk starting at a particular offset
The size of the underlying segments is fixed at 32 bytes (called the resolution
of the BMT hash), the EVM word size to optimize for on-chain BMT verification
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
Two implementations are provided:
* RefHasher is optimized for code simplicity and meant as a reference implementation
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
control structure to coordinate the concurrent routines
It implements the ChunkHash interface as well as the go standard hash.Hash interface
*/
const (
// DefaultSegmentCount is the maximum number of segments of the underlying chunk
DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches
// DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e,
// the maximum number of concurrent BMT hashing operations performed by the same hasher
DefaultPoolSize = 8
)
// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT.
type BaseHasher func() hash.Hash
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
// implements the hash.Hash interface
// reuse pool of Tree-s for amortised memory allocation and resource control
// supports order-agnostic concurrent segment writes
// as well as sequential read and write
// can not be called concurrently on more than one chunk
// can be further appended after Sum
// Reset gives back the Tree to the pool and guaranteed to leave
// the tree and itself in a state reusable for hashing a new chunk
type Hasher struct {
pool *TreePool // BMT resource pool
bmt *Tree // prebuilt BMT resource for flowcontrol and proofs
blocksize int // segment size (size of hash) also for hash.Hash
count int // segment count
size int // for hash.Hash same as hashsize
cur int // cursor position for rightmost currently open chunk
segment []byte // the rightmost open segment (not complete)
depth int // index of last level
result chan []byte // result channel
hash []byte // to record the result
max int32 // max segments for SegmentWriter interface
blockLength []byte // The block length that needes to be added in Sum
}
// New creates a reusable Hasher
// implements the hash.Hash interface
// pulls a new Tree from a resource pool for hashing each chunk
func New(p *TreePool) *Hasher {
return &Hasher{
pool: p,
depth: depth(p.SegmentCount),
size: p.SegmentSize,
blocksize: p.SegmentSize,
count: p.SegmentCount,
result: make(chan []byte),
}
}
// Node is a reuseable segment hasher representing a node in a BMT
// it allows for continued writes after a Sum
// and is left in completely reusable state after Reset
type Node struct {
level, index int // position of node for information/logging only
initial bool // first and last node
root bool // whether the node is root to a smaller BMT
isLeft bool // whether it is left side of the parent double segment
unbalanced bool // indicates if a node has only the left segment
parent *Node // BMT connections
state int32 // atomic increment impl concurrent boolean toggle
left, right []byte
}
// NewNode constructor for segment hasher nodes in the BMT
func NewNode(level, index int, parent *Node) *Node {
return &Node{
parent: parent,
level: level,
index: index,
initial: index == 0,
isLeft: index%2 == 0,
}
}
// TreePool provides a pool of Trees used as resources by Hasher
// a Tree popped from the pool is guaranteed to have clean state
// for hashing a new chunk
// Hasher Reset releases the Tree to the pool
type TreePool struct {
lock sync.Mutex
c chan *Tree
hasher BaseHasher
SegmentSize int
SegmentCount int
Capacity int
count int
}
// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity
// on GetTree it reuses free Trees or creates a new one if size is not reached
func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool {
return &TreePool{
c: make(chan *Tree, capacity),
hasher: hasher,
SegmentSize: hasher().Size(),
SegmentCount: segmentCount,
Capacity: capacity,
}
}
// Drain drains the pool until it has no more than n resources
func (p *TreePool) Drain(n int) {
p.lock.Lock()
defer p.lock.Unlock()
for len(p.c) > n {
<-p.c
p.count--
}
}
// Reserve is blocking until it returns an available Tree
// it reuses free Trees or creates a new one if size is not reached
func (p *TreePool) Reserve() *Tree {
p.lock.Lock()
defer p.lock.Unlock()
var t *Tree
if p.count == p.Capacity {
return <-p.c
}
select {
case t = <-p.c:
default:
t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount)
p.count++
}
return t
}
// Release gives back a Tree to the pool.
// This Tree is guaranteed to be in reusable state
// does not need locking
func (p *TreePool) Release(t *Tree) {
p.c <- t // can never fail but...
}
// Tree is a reusable control structure representing a BMT
// organised in a binary tree
// Hasher uses a TreePool to pick one for each chunk hash
// the Tree is 'locked' while not in the pool
type Tree struct {
leaves []*Node
}
// Draw draws the BMT (badly)
func (t *Tree) Draw(hash []byte, d int) string {
var left, right []string
var anc []*Node
for i, n := range t.leaves {
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
if i%2 == 0 {
anc = append(anc, n.parent)
}
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
}
anc = t.leaves
var hashes [][]string
for l := 0; len(anc) > 0; l++ {
var nodes []*Node
hash := []string{""}
for i, n := range anc {
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
if i%2 == 0 && n.parent != nil {
nodes = append(nodes, n.parent)
}
}
hash = append(hash, "")
hashes = append(hashes, hash)
anc = nodes
}
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
total := 60
del := " "
var rows []string
for i := len(hashes) - 1; i >= 0; i-- {
var textlen int
hash := hashes[i]
for _, s := range hash {
textlen += len(s)
}
if total < textlen {
total = textlen + len(hash)
}
delsize := (total - textlen) / (len(hash) - 1)
if delsize > len(del) {
delsize = len(del)
}
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
rows = append(rows, row)
}
rows = append(rows, strings.Join(left, " "))
rows = append(rows, strings.Join(right, " "))
return strings.Join(rows, "\n") + "\n"
}
// NewTree initialises the Tree by building up the nodes of a BMT
// segment size is stipulated to be the size of the hash
// segmentCount needs to be positive integer and does not need to be
// a power of two and can even be an odd number
// segmentSize * segmentCount determines the maximum chunk size
// hashed using the tree
func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree {
n := NewNode(0, 0, nil)
n.root = true
prevlevel := []*Node{n}
// iterate over levels and creates 2^level nodes
level := 1
count := 2
for d := 1; d <= depth(segmentCount); d++ {
nodes := make([]*Node, count)
for i := 0; i < len(nodes); i++ {
parent := prevlevel[i/2]
t := NewNode(level, i, parent)
nodes[i] = t
}
prevlevel = nodes
level++
count *= 2
}
// the datanode level is the nodes on the last level where
return &Tree{
leaves: prevlevel,
}
}
// methods needed by hash.Hash
// Size returns the size
func (h *Hasher) Size() int {
return h.size
}
// BlockSize returns the block size
func (h *Hasher) BlockSize() int {
return h.blocksize
}
// Sum returns the hash of the buffer
// hash.Hash interface Sum method appends the byte slice to the underlying
// data before it calculates and returns the hash of the chunk
func (h *Hasher) Sum(b []byte) (r []byte) {
t := h.bmt
i := h.cur
n := t.leaves[i]
j := i
// must run strictly before all nodes calculate
// datanodes are guaranteed to have a parent
if len(h.segment) > h.size && i > 0 && n.parent != nil {
n = n.parent
} else {
i *= 2
}
d := h.finalise(n, i)
h.writeSegment(j, h.segment, d)
c := <-h.result
h.releaseTree()
// sha3(length + BMT(pure_chunk))
if h.blockLength == nil {
return c
}
res := h.pool.hasher()
res.Reset()
res.Write(h.blockLength)
res.Write(c)
return res.Sum(nil)
}
// Hasher implements the SwarmHash interface
// Hash waits for the hasher result and returns it
// caller must call this on a BMT Hasher being written to
func (h *Hasher) Hash() []byte {
return <-h.result
}
// Hasher implements the io.Writer interface
// Write fills the buffer to hash
// with every full segment complete launches a hasher go routine
// that shoots up the BMT
func (h *Hasher) Write(b []byte) (int, error) {
l := len(b)
if l <= 0 {
return 0, nil
}
s := h.segment
i := h.cur
count := (h.count + 1) / 2
need := h.count*h.size - h.cur*2*h.size
size := h.size
if need > size {
size *= 2
}
if l < need {
need = l
}
// calculate missing bit to complete current open segment
rest := size - len(s)
if need < rest {
rest = need
}
s = append(s, b[:rest]...)
need -= rest
// read full segments and the last possibly partial segment
for need > 0 && i < count-1 {
// push all finished chunks we read
h.writeSegment(i, s, h.depth)
need -= size
if need < 0 {
size += need
}
s = b[rest : rest+size]
rest += size
i++
}
h.segment = s
h.cur = i
// otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full
return l, nil
}
// Hasher implements the io.ReaderFrom interface
// ReadFrom reads from io.Reader and appends to the data to hash using Write
// it reads so that chunk to hash is maximum length or reader reaches EOF
// caller must Reset the hasher prior to call
func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) {
bufsize := h.size*h.count - h.size*h.cur - len(h.segment)
buf := make([]byte, bufsize)
var read int
for {
var n int
n, err = r.Read(buf)
read += n
if err == io.EOF || read == len(buf) {
hash := h.Sum(buf[:n])
if read == len(buf) {
err = NewEOC(hash)
}
break
}
if err != nil {
break
}
n, err = h.Write(buf[:n])
if err != nil {
break
}
}
return int64(read), err
}
// Reset needs to be called before writing to the hasher
func (h *Hasher) Reset() {
h.getTree()
h.blockLength = nil
}
// Hasher implements the SwarmHash interface
// ResetWithLength needs to be called before writing to the hasher
// the argument is supposed to be the byte slice binary representation of
// the length of the data subsumed under the hash
func (h *Hasher) ResetWithLength(l []byte) {
h.Reset()
h.blockLength = l
}
// Release gives back the Tree to the pool whereby it unlocks
// it resets tree, segment and index
func (h *Hasher) releaseTree() {
if h.bmt != nil {
n := h.bmt.leaves[h.cur]
for ; n != nil; n = n.parent {
n.unbalanced = false
if n.parent != nil {
n.root = false
}
}
h.pool.Release(h.bmt)
h.bmt = nil
}
h.cur = 0
h.segment = nil
}
func (h *Hasher) writeSegment(i int, s []byte, d int) {
hash := h.pool.hasher()
n := h.bmt.leaves[i]
if len(s) > h.size && n.parent != nil {
go func() {
hash.Reset()
hash.Write(s)
s = hash.Sum(nil)
if n.root {
h.result <- s
return
}
h.run(n.parent, hash, d, n.index, s)
}()
return
}
go h.run(n, hash, d, i*2, s)
}
func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) {
isLeft := i%2 == 0
for {
if isLeft {
n.left = s
} else {
n.right = s
}
if !n.unbalanced && n.toggle() {
return
}
if !n.unbalanced || !isLeft || i == 0 && d == 0 {
hash.Reset()
hash.Write(n.left)
hash.Write(n.right)
s = hash.Sum(nil)
} else {
s = append(n.left, n.right...)
}
h.hash = s
if n.root {
h.result <- s
return
}
isLeft = n.isLeft
n = n.parent
i++
}
}
// getTree obtains a BMT resource by reserving one from the pool
func (h *Hasher) getTree() *Tree {
if h.bmt != nil {
return h.bmt
}
t := h.pool.Reserve()
h.bmt = t
return t
}
// atomic bool toggle implementing a concurrent reusable 2-state object
// atomic addint with %2 implements atomic bool toggle
// it returns true if the toggler just put it in the active/waiting state
func (n *Node) toggle() bool {
return atomic.AddInt32(&n.state, 1)%2 == 1
}
func hashstr(b []byte) string {
end := len(b)
if end > 4 {
end = 4
}
return fmt.Sprintf("%x", b[:end])
}
func depth(n int) (d int) {
for l := (n - 1) / 2; l > 0; l /= 2 {
d++
}
return d
}
// finalise is following the zigzags on the tree belonging
// to the final datasegment
func (h *Hasher) finalise(n *Node, i int) (d int) {
isLeft := i%2 == 0
for {
// when the final segment's path is going via left segments
// the incoming data is pushed to the parent upon pulling the left
// we do not need toggle the state since this condition is
// detectable
n.unbalanced = isLeft
n.right = nil
if n.initial {
n.root = true
return d
}
isLeft = n.isLeft
n = n.parent
d++
}
}
// EOC (end of chunk) implements the error interface
type EOC struct {
Hash []byte // read the hash of the chunk off the error
}
// Error returns the error string
func (e *EOC) Error() string {
return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash)
}
// NewEOC creates new end of chunk error with the hash
func NewEOC(hash []byte) *EOC {
return &EOC{hash}
}

@ -1,85 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bmt is a simple nonconcurrent reference implementation for hashsize segment based
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
//
// This implementation does not take advantage of any paralellisms and uses
// far more memory than necessary, but it is easy to see that it is correct.
// It can be used for generating test cases for optimized implementations.
// see testBMTHasherCorrectness function in bmt_test.go
package bmt
import (
"hash"
)
// RefHasher is the non-optimized easy to read reference implementation of BMT
type RefHasher struct {
span int
section int
cap int
h hash.Hash
}
// NewRefHasher returns a new RefHasher
func NewRefHasher(hasher BaseHasher, count int) *RefHasher {
h := hasher()
hashsize := h.Size()
maxsize := hashsize * count
c := 2
for ; c < count; c *= 2 {
}
if c > 2 {
c /= 2
}
return &RefHasher{
section: 2 * hashsize,
span: c * hashsize,
cap: maxsize,
h: h,
}
}
// Hash returns the BMT hash of the byte slice
// implements the SwarmHash interface
func (rh *RefHasher) Hash(d []byte) []byte {
if len(d) > rh.cap {
d = d[:rh.cap]
}
return rh.hash(d, rh.span)
}
func (rh *RefHasher) hash(d []byte, s int) []byte {
l := len(d)
left := d
var right []byte
if l > rh.section {
for ; s >= l; s /= 2 {
}
left = rh.hash(d[:s], s)
right = d[s:]
if l-s > rh.section/2 {
right = rh.hash(right, s)
}
}
defer rh.h.Reset()
rh.h.Write(left)
rh.h.Write(right)
h := rh.h.Sum(nil)
return h
}

@ -1,481 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bmt
import (
"bytes"
crand "crypto/rand"
"fmt"
"hash"
"io"
"math/rand"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto/sha3"
)
const (
maxproccnt = 8
)
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
// all data lengths between 0 and 256 bytes
func TestRefHasher(t *testing.T) {
hashFunc := sha3.NewKeccak256
sha3 := func(data ...[]byte) []byte {
h := hashFunc()
for _, v := range data {
h.Write(v)
}
return h.Sum(nil)
}
// the test struct is used to specify the expected BMT hash for data
// lengths between "from" and "to"
type test struct {
from int64
to int64
expected func([]byte) []byte
}
var tests []*test
// all lengths in [0,64] should be:
//
// sha3(data)
//
tests = append(tests, &test{
from: 0,
to: 64,
expected: func(data []byte) []byte {
return sha3(data)
},
})
// all lengths in [65,96] should be:
//
// sha3(
// sha3(data[:64])
// data[64:]
// )
//
tests = append(tests, &test{
from: 65,
to: 96,
expected: func(data []byte) []byte {
return sha3(sha3(data[:64]), data[64:])
},
})
// all lengths in [97,128] should be:
//
// sha3(
// sha3(data[:64])
// sha3(data[64:])
// )
//
tests = append(tests, &test{
from: 97,
to: 128,
expected: func(data []byte) []byte {
return sha3(sha3(data[:64]), sha3(data[64:]))
},
})
// all lengths in [129,160] should be:
//
// sha3(
// sha3(
// sha3(data[:64])
// sha3(data[64:128])
// )
// data[128:]
// )
//
tests = append(tests, &test{
from: 129,
to: 160,
expected: func(data []byte) []byte {
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:])
},
})
// all lengths in [161,192] should be:
//
// sha3(
// sha3(
// sha3(data[:64])
// sha3(data[64:128])
// )
// sha3(data[128:])
// )
//
tests = append(tests, &test{
from: 161,
to: 192,
expected: func(data []byte) []byte {
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:]))
},
})
// all lengths in [193,224] should be:
//
// sha3(
// sha3(
// sha3(data[:64])
// sha3(data[64:128])
// )
// sha3(
// sha3(data[128:192])
// data[192:]
// )
// )
//
tests = append(tests, &test{
from: 193,
to: 224,
expected: func(data []byte) []byte {
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:]))
},
})
// all lengths in [225,256] should be:
//
// sha3(
// sha3(
// sha3(data[:64])
// sha3(data[64:128])
// )
// sha3(
// sha3(data[128:192])
// sha3(data[192:])
// )
// )
//
tests = append(tests, &test{
from: 225,
to: 256,
expected: func(data []byte) []byte {
return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:])))
},
})
// run the tests
for _, x := range tests {
for length := x.from; length <= x.to; length++ {
t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) {
data := make([]byte, length)
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
t.Fatal(err)
}
expected := x.expected(data)
actual := NewRefHasher(hashFunc, 128).Hash(data)
if !bytes.Equal(actual, expected) {
t.Fatalf("expected %x, got %x", expected, actual)
}
})
}
}
}
func testDataReader(l int) (r io.Reader) {
return io.LimitReader(crand.Reader, int64(l))
}
func TestHasherCorrectness(t *testing.T) {
err := testHasher(testBaseHasher)
if err != nil {
t.Fatal(err)
}
}
func testHasher(f func(BaseHasher, []byte, int, int) error) error {
tdata := testDataReader(4128)
data := make([]byte, 4128)
tdata.Read(data)
hasher := sha3.NewKeccak256
size := hasher().Size()
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
var err error
for _, count := range counts {
max := count * size
incr := 1
for n := 0; n <= max+incr; n += incr {
err = f(hasher, data, n, count)
if err != nil {
return err
}
}
}
return nil
}
func TestHasherReuseWithoutRelease(t *testing.T) {
testHasherReuse(1, t)
}
func TestHasherReuseWithRelease(t *testing.T) {
testHasherReuse(maxproccnt, t)
}
func testHasherReuse(i int, t *testing.T) {
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, 128, i)
defer pool.Drain(0)
bmt := New(pool)
for i := 0; i < 500; i++ {
n := rand.Intn(4096)
tdata := testDataReader(n)
data := make([]byte, n)
tdata.Read(data)
err := testHasherCorrectness(bmt, hasher, data, n, 128)
if err != nil {
t.Fatal(err)
}
}
}
func TestHasherConcurrency(t *testing.T) {
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, 128, maxproccnt)
defer pool.Drain(0)
wg := sync.WaitGroup{}
cycles := 100
wg.Add(maxproccnt * cycles)
errc := make(chan error)
for p := 0; p < maxproccnt; p++ {
for i := 0; i < cycles; i++ {
go func() {
bmt := New(pool)
n := rand.Intn(4096)
tdata := testDataReader(n)
data := make([]byte, n)
tdata.Read(data)
err := testHasherCorrectness(bmt, hasher, data, n, 128)
wg.Done()
if err != nil {
errc <- err
}
}()
}
}
go func() {
wg.Wait()
close(errc)
}()
var err error
select {
case <-time.NewTimer(5 * time.Second).C:
err = fmt.Errorf("timed out")
case err = <-errc:
}
if err != nil {
t.Fatal(err)
}
}
func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error {
pool := NewTreePool(hasher, count, 1)
defer pool.Drain(0)
bmt := New(pool)
return testHasherCorrectness(bmt, hasher, d, n, count)
}
func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) {
data := d[:n]
rbmt := NewRefHasher(hasher, count)
exp := rbmt.Hash(data)
timeout := time.NewTimer(time.Second)
c := make(chan error)
go func() {
bmt.Reset()
bmt.Write(data)
got := bmt.Sum(nil)
if !bytes.Equal(got, exp) {
c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
}
close(c)
}()
select {
case <-timeout.C:
err = fmt.Errorf("BMT hash calculation timed out")
case err = <-c:
}
return err
}
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) }
func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) }
func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) }
func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) }
func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) }
func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) }
func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) }
func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) }
func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) }
func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) }
func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) }
func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) }
func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) }
func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) }
func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) }
func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) }
func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) }
func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) }
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
// doing it on n maxproccnt each reusing the base hasher
// the premise is that this is the minimum computation needed for a BMT
// therefore this serves as a theoretical optimum for concurrent implementations
func benchmarkBMTBaseline(n int, t *testing.B) {
tdata := testDataReader(64)
data := make([]byte, 64)
tdata.Read(data)
hasher := sha3.NewKeccak256
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
count := int32((n-1)/hasher().Size() + 1)
wg := sync.WaitGroup{}
wg.Add(maxproccnt)
var i int32
for j := 0; j < maxproccnt; j++ {
go func() {
defer wg.Done()
h := hasher()
for atomic.AddInt32(&i, 1) < count {
h.Reset()
h.Write(data)
h.Sum(nil)
}
}()
}
wg.Wait()
}
}
func benchmarkHasher(n int, t *testing.B) {
tdata := testDataReader(n)
data := make([]byte, n)
tdata.Read(data)
size := 1
hasher := sha3.NewKeccak256
segmentCount := 128
pool := NewTreePool(hasher, segmentCount, size)
bmt := New(pool)
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
bmt.Reset()
bmt.Write(data)
bmt.Sum(nil)
}
}
func benchmarkHasherReuse(poolsize, n int, t *testing.B) {
tdata := testDataReader(n)
data := make([]byte, n)
tdata.Read(data)
hasher := sha3.NewKeccak256
segmentCount := 128
pool := NewTreePool(hasher, segmentCount, poolsize)
cycles := 200
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
wg := sync.WaitGroup{}
wg.Add(cycles)
for j := 0; j < cycles; j++ {
bmt := New(pool)
go func() {
defer wg.Done()
bmt.Reset()
bmt.Write(data)
bmt.Sum(nil)
}()
}
wg.Wait()
}
}
func benchmarkSHA3(n int, t *testing.B) {
data := make([]byte, n)
tdata := testDataReader(n)
tdata.Read(data)
hasher := sha3.NewKeccak256
h := hasher()
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
h.Reset()
h.Write(data)
h.Sum(nil)
}
}
func benchmarkRefHasher(n int, t *testing.B) {
data := make([]byte, n)
tdata := testDataReader(n)
tdata.Read(data)
hasher := sha3.NewKeccak256
rbmt := NewRefHasher(hasher, 128)
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
rbmt.Hash(data)
}
}

@ -275,9 +275,8 @@ func createNode(ctx *cli.Context) error {
if len(ctx.Args()) != 0 { if len(ctx.Args()) != 0 {
return cli.ShowCommandHelp(ctx, ctx.Command.Name) return cli.ShowCommandHelp(ctx, ctx.Command.Name)
} }
config := &adapters.NodeConfig{ config := adapters.RandomNodeConfig()
Name: ctx.String("name"), config.Name = ctx.String("name")
}
if key := ctx.String("key"); key != "" { if key := ctx.String("key"); key != "" {
privKey, err := crypto.HexToECDSA(key) privKey, err := crypto.HexToECDSA(key)
if err != nil { if err != nil {

@ -24,6 +24,7 @@ import (
"reflect" "reflect"
"strconv" "strconv"
"strings" "strings"
"time"
"unicode" "unicode"
cli "gopkg.in/urfave/cli.v1" cli "gopkg.in/urfave/cli.v1"
@ -37,6 +38,8 @@ import (
bzzapi "github.com/ethereum/go-ethereum/swarm/api" bzzapi "github.com/ethereum/go-ethereum/swarm/api"
) )
const SWARM_VERSION = "0.3"
var ( var (
//flag definition for the dumpconfig command //flag definition for the dumpconfig command
DumpConfigCommand = cli.Command{ DumpConfigCommand = cli.Command{
@ -58,19 +61,25 @@ var (
//constants for environment variables //constants for environment variables
const ( const (
SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR"
SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT"
SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR"
SWARM_ENV_PORT = "SWARM_PORT" SWARM_ENV_PORT = "SWARM_PORT"
SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID"
SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE"
SWARM_ENV_SWAP_API = "SWARM_SWAP_API" SWARM_ENV_SWAP_API = "SWARM_SWAP_API"
SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE" SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE"
SWARM_ENV_ENS_API = "SWARM_ENS_API" SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY"
SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK"
SWARM_ENV_CORS = "SWARM_CORS" SWARM_ENV_ENS_API = "SWARM_ENS_API"
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR"
GETH_ENV_DATADIR = "GETH_DATADIR" SWARM_ENV_CORS = "SWARM_CORS"
SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES"
SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE"
SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH"
SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY"
SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY"
GETH_ENV_DATADIR = "GETH_DATADIR"
) )
// These settings ensure that TOML keys use the same names as Go struct fields. // These settings ensure that TOML keys use the same names as Go struct fields.
@ -92,10 +101,8 @@ var tomlSettings = toml.Config{
//before booting the swarm node, build the configuration //before booting the swarm node, build the configuration
func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) { func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
//check for deprecated flags
checkDeprecated(ctx)
//start by creating a default config //start by creating a default config
config = bzzapi.NewDefaultConfig() config = bzzapi.NewConfig()
//first load settings from config file (if provided) //first load settings from config file (if provided)
config, err = configFileOverride(config, ctx) config, err = configFileOverride(config, ctx)
if err != nil { if err != nil {
@ -168,7 +175,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" { if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 { if id, _ := strconv.Atoi(networkid); id != 0 {
currentConfig.NetworkId = uint64(id) currentConfig.NetworkID = uint64(id)
} }
} }
@ -191,12 +198,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.SwapEnabled = true currentConfig.SwapEnabled = true
} }
if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) { if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) {
currentConfig.SyncEnabled = true currentConfig.SyncEnabled = false
}
if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 {
currentConfig.SyncUpdateDelay = d
} }
currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name) if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) {
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" { currentConfig.DeliverySkipCheck = true
}
currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name)
if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API) utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
} }
@ -209,10 +224,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.EnsAPIs = ensAPIs currentConfig.EnsAPIs = ensAPIs
} }
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
}
if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" { if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" {
currentConfig.Cors = cors currentConfig.Cors = cors
} }
@ -221,6 +232,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name) currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name)
} }
if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" {
currentConfig.LocalStoreParams.ChunkDbPath = storePath
}
if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 {
currentConfig.LocalStoreParams.DbCapacity = storeCapacity
}
if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 {
currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity
}
return currentConfig return currentConfig
} }
@ -239,7 +262,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" { if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" {
if id, _ := strconv.Atoi(networkid); id != 0 { if id, _ := strconv.Atoi(networkid); id != 0 {
currentConfig.NetworkId = uint64(id) currentConfig.NetworkID = uint64(id)
} }
} }
@ -262,17 +285,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
} }
} }
if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" { if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" {
if sync, err := strconv.ParseBool(syncenable); err != nil { if sync, err := strconv.ParseBool(syncdisable); err != nil {
currentConfig.SyncEnabled = sync currentConfig.SyncEnabled = !sync
}
}
if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" {
if skipCheck, err := strconv.ParseBool(v); err != nil {
currentConfig.DeliverySkipCheck = skipCheck
}
}
if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" {
if d, err := time.ParseDuration(v); err != nil {
currentConfig.SyncUpdateDelay = d
} }
} }
if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" { if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" {
currentConfig.SwapApi = swapapi currentConfig.SwapAPI = swapapi
} }
if currentConfig.SwapEnabled && currentConfig.SwapApi == "" { if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API) utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
} }
@ -312,18 +347,6 @@ func dumpConfig(ctx *cli.Context) error {
return nil return nil
} }
//deprecated flags checked here
func checkDeprecated(ctx *cli.Context) {
// exit if the deprecated --ethapi flag is set
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
}
// warn if --ens-api flag is set
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
}
}
//validate configuration parameters //validate configuration parameters
func validateConfig(cfg *bzzapi.Config) (err error) { func validateConfig(cfg *bzzapi.Config) (err error) {
for _, ensAPI := range cfg.EnsAPIs { for _, ensAPI := range cfg.EnsAPIs {

@ -34,7 +34,7 @@ import (
func TestDumpConfig(t *testing.T) { func TestDumpConfig(t *testing.T) {
swarm := runSwarm(t, "dumpconfig") swarm := runSwarm(t, "dumpconfig")
defaultConf := api.NewDefaultConfig() defaultConf := api.NewConfig()
out, err := tomlSettings.Marshal(&defaultConf) out, err := tomlSettings.Marshal(&defaultConf)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) {
swarm.ExpectExit() swarm.ExpectExit()
} }
func TestFailsSwapEnabledNoSwapApi(t *testing.T) { func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) {
flags := []string{ flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) {
swarm.ExpectExit() swarm.ExpectExit()
} }
func TestFailsNoBzzAccount(t *testing.T) { func TestConfigFailsNoBzzAccount(t *testing.T) {
flags := []string{ flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545",
@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) {
swarm.ExpectExit() swarm.ExpectExit()
} }
func TestCmdLineOverrides(t *testing.T) { func TestConfigCmdLineOverrides(t *testing.T) {
dir, err := ioutil.TempDir("", "bzztest") dir, err := ioutil.TempDir("", "bzztest")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) {
flags := []string{ flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42",
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name), fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", CorsStringFlag.Name), "*", fmt.Sprintf("--%s", CorsStringFlag.Name), "*",
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name),
fmt.Sprintf("--%s", EnsAPIFlag.Name), "", fmt.Sprintf("--%s", EnsAPIFlag.Name), "",
"--datadir", dir, "--datadir", dir,
"--ipcpath", conf.IPCPath, "--ipcpath", conf.IPCPath,
@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
} }
if info.NetworkId != 42 { if info.NetworkID != 42 {
t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId) t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID)
} }
if !info.SyncEnabled { if info.SyncEnabled {
t.Fatal("Expected Sync to be enabled, but is false") t.Fatal("Expected Sync to be disabled, but is true")
}
if !info.DeliverySkipCheck {
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
} }
if info.Cors != "*" { if info.Cors != "*" {
@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) {
node.Shutdown() node.Shutdown()
} }
func TestFileOverrides(t *testing.T) { func TestConfigFileOverrides(t *testing.T) {
// assign ports // assign ports
httpPort, err := assignTCPPort() httpPort, err := assignTCPPort()
@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) {
//create a config file //create a config file
//first, create a default conf //first, create a default conf
defaultConf := api.NewDefaultConfig() defaultConf := api.NewConfig()
//change some values in order to test if they have been loaded //change some values in order to test if they have been loaded
defaultConf.SyncEnabled = true defaultConf.SyncEnabled = false
defaultConf.NetworkId = 54 defaultConf.DeliverySkipCheck = true
defaultConf.NetworkID = 54
defaultConf.Port = httpPort defaultConf.Port = httpPort
defaultConf.StoreParams.DbCapacity = 9000000 defaultConf.DbCapacity = 9000000
defaultConf.ChunkerParams.Branches = 64 defaultConf.HiveParams.KeepAliveInterval = 6000000000
defaultConf.HiveParams.CallInterval = 6000000000
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
defaultConf.SyncParams.KeyBufferSize = 512 //defaultConf.SyncParams.KeyBufferSize = 512
//create a TOML string //create a TOML string
out, err := tomlSettings.Marshal(&defaultConf) out, err := tomlSettings.Marshal(&defaultConf)
if err != nil { if err != nil {
@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
} }
if info.NetworkId != 54 { if info.NetworkID != 54 {
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
} }
if !info.SyncEnabled { if info.SyncEnabled {
t.Fatal("Expected Sync to be enabled, but is false") t.Fatal("Expected Sync to be disabled, but is true")
} }
if info.StoreParams.DbCapacity != 9000000 { if !info.DeliverySkipCheck {
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
} }
if info.ChunkerParams.Branches != 64 { if info.DbCapacity != 9000000 {
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches) t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID)
} }
if info.HiveParams.CallInterval != 6000000000 { if info.HiveParams.KeepAliveInterval != 6000000000 {
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval)) t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
} }
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
} }
if info.SyncParams.KeyBufferSize != 512 { // if info.SyncParams.KeyBufferSize != 512 {
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
} // }
node.Shutdown() node.Shutdown()
} }
func TestEnvVars(t *testing.T) { func TestConfigEnvVars(t *testing.T) {
// assign ports // assign ports
httpPort, err := assignTCPPort() httpPort, err := assignTCPPort()
if err != nil { if err != nil {
@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) {
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort)) envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999")) envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999"))
envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*")) envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*"))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true")) envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true"))
envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true"))
dir, err := ioutil.TempDir("", "bzztest") dir, err := ioutil.TempDir("", "bzztest")
if err != nil { if err != nil {
@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
} }
if info.NetworkId != 999 { if info.NetworkID != 999 {
t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId) t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID)
} }
if info.Cors != "*" { if info.Cors != "*" {
t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors) t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors)
} }
if !info.SyncEnabled { if info.SyncEnabled {
t.Fatal("Expected Sync to be enabled, but is false") t.Fatal("Expected Sync to be disabled, but is true")
}
if !info.DeliverySkipCheck {
t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not")
} }
node.Shutdown() node.Shutdown()
cmd.Process.Kill() cmd.Process.Kill()
} }
func TestCmdLineOverridesFile(t *testing.T) { func TestConfigCmdLineOverridesFile(t *testing.T) {
// assign ports // assign ports
httpPort, err := assignTCPPort() httpPort, err := assignTCPPort()
@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) {
//create a config file //create a config file
//first, create a default conf //first, create a default conf
defaultConf := api.NewDefaultConfig() defaultConf := api.NewConfig()
//change some values in order to test if they have been loaded //change some values in order to test if they have been loaded
defaultConf.SyncEnabled = false defaultConf.SyncEnabled = true
defaultConf.NetworkId = 54 defaultConf.NetworkID = 54
defaultConf.Port = "8588" defaultConf.Port = "8588"
defaultConf.StoreParams.DbCapacity = 9000000 defaultConf.DbCapacity = 9000000
defaultConf.ChunkerParams.Branches = 64 defaultConf.HiveParams.KeepAliveInterval = 6000000000
defaultConf.HiveParams.CallInterval = 6000000000
defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second
defaultConf.SyncParams.KeyBufferSize = 512 //defaultConf.SyncParams.KeyBufferSize = 512
//create a TOML file //create a TOML file
out, err := tomlSettings.Marshal(&defaultConf) out, err := tomlSettings.Marshal(&defaultConf)
if err != nil { if err != nil {
t.Fatalf("Error creating TOML file in TestFileOverride: %v", err) t.Fatalf("Error creating TOML file in TestFileOverride: %v", err)
} }
//write file //write file
f, err := ioutil.TempFile("", "testconfig.toml") fname := "testconfig.toml"
f, err := ioutil.TempFile("", fname)
if err != nil { if err != nil {
t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) t.Fatalf("Error writing TOML file in TestFileOverride: %v", err)
} }
defer os.Remove(fname)
//write file //write file
_, err = f.WriteString(string(out)) _, err = f.WriteString(string(out))
if err != nil { if err != nil {
@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) {
flags := []string{ flags := []string{
fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77", fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77",
fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort,
fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name), fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name),
fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(), fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(),
fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(),
"--ens-api", "", "--ens-api", "",
@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) {
t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port)
} }
if info.NetworkId != expectNetworkId { if info.NetworkID != expectNetworkId {
t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId) t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID)
} }
if !info.SyncEnabled { if info.SyncEnabled {
t.Fatal("Expected Sync to be enabled, but is false") t.Fatal("Expected Sync to be disabled, but is true")
} }
if info.StoreParams.DbCapacity != 9000000 { if info.LocalStoreParams.DbCapacity != 9000000 {
t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity)
} }
if info.ChunkerParams.Branches != 64 { if info.HiveParams.KeepAliveInterval != 6000000000 {
t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches) t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval))
}
if info.HiveParams.CallInterval != 6000000000 {
t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval))
} }
if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second {
t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval)
} }
if info.SyncParams.KeyBufferSize != 512 { // if info.SyncParams.KeyBufferSize != 512 {
t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize)
} // }
node.Shutdown() node.Shutdown()
} }

@ -23,6 +23,7 @@ import (
"path/filepath" "path/filepath"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
@ -30,11 +31,11 @@ import (
func dbExport(ctx *cli.Context) { func dbExport(ctx *cli.Context) {
args := ctx.Args() args := ctx.Args()
if len(args) != 2 { if len(args) != 3 {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)") utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to write the tar archive to, - for stdout) and the base key")
} }
store, err := openDbStore(args[0]) store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil { if err != nil {
utils.Fatalf("error opening local chunk database: %s", err) utils.Fatalf("error opening local chunk database: %s", err)
} }
@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) {
func dbImport(ctx *cli.Context) { func dbImport(ctx *cli.Context) {
args := ctx.Args() args := ctx.Args()
if len(args) != 2 { if len(args) != 3 {
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)") utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database), <file> (path to read the tar archive from, - for stdin) and the base key")
} }
store, err := openDbStore(args[0]) store, err := openLDBStore(args[0], common.Hex2Bytes(args[2]))
if err != nil { if err != nil {
utils.Fatalf("error opening local chunk database: %s", err) utils.Fatalf("error opening local chunk database: %s", err)
} }
@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) {
func dbClean(ctx *cli.Context) { func dbClean(ctx *cli.Context) {
args := ctx.Args() args := ctx.Args()
if len(args) != 1 { if len(args) != 2 {
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)") utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database) and the base key")
} }
store, err := openDbStore(args[0]) store, err := openLDBStore(args[0], common.Hex2Bytes(args[1]))
if err != nil { if err != nil {
utils.Fatalf("error opening local chunk database: %s", err) utils.Fatalf("error opening local chunk database: %s", err)
} }
@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) {
store.Cleanup() store.Cleanup()
} }
func openDbStore(path string) (*storage.DbStore, error) { func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) {
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
return nil, fmt.Errorf("invalid chunkdb path: %s", err) return nil, fmt.Errorf("invalid chunkdb path: %s", err)
} }
hash := storage.MakeHashFunc("SHA3")
return storage.NewDbStore(path, hash, 10000000, 0) storeparams := storage.NewDefaultStoreParams()
ldbparams := storage.NewLDBStoreParams(storeparams, path)
ldbparams.BaseKey = basekey
return storage.NewLDBStore(ldbparams)
} }

@ -0,0 +1,85 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
"gopkg.in/urfave/cli.v1"
)
func download(ctx *cli.Context) {
log.Debug("downloading content using swarm down")
args := ctx.Args()
dest := "."
switch len(args) {
case 0:
utils.Fatalf("Usage: swarm down [options] <bzz locator> [<destination path>]")
case 1:
log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir"))
default:
log.Trace(fmt.Sprintf("destination path arg: %s", args[1]))
if absDest, err := filepath.Abs(args[1]); err == nil {
dest = absDest
} else {
utils.Fatalf("could not get download path: %v", err)
}
}
var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
isRecursive = ctx.Bool(SwarmRecursiveFlag.Name)
client = swarm.NewClient(bzzapi)
)
if fi, err := os.Stat(dest); err == nil {
if isRecursive && !fi.Mode().IsDir() {
utils.Fatalf("destination path is not a directory!")
}
} else {
if !os.IsNotExist(err) {
utils.Fatalf("could not stat path: %v", err)
}
}
uri, err := api.Parse(args[0])
if err != nil {
utils.Fatalf("could not parse uri argument: %v", err)
}
// assume behaviour according to --recursive switch
if isRecursive {
if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil {
utils.Fatalf("encoutered an error while downloading directory: %v", err)
}
} else {
// we are downloading a file
log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path))
err := client.DownloadFile(uri.Addr, uri.Path, dest)
if err != nil {
utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err)
}
}
}

@ -0,0 +1,139 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"crypto/md5"
"crypto/rand"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
"github.com/ethereum/go-ethereum/swarm"
)
// TestCLISwarmExportImport perform the following test:
// 1. runs swarm node
// 2. uploads a random file
// 3. runs an export of the local datastore
// 4. runs a second swarm node
// 5. imports the exported datastore
// 6. fetches the uploaded random file from the second node
func TestCLISwarmExportImport(t *testing.T) {
cluster := newTestCluster(t, 1)
// generate random 10mb file
f, cleanup := generateRandomFile(t, 10000000)
defer cleanup()
// upload the file with 'swarm up' and expect a hash
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name())
_, matches := up.ExpectRegexp(`[a-f\d]{64}`)
up.ExpectExit()
hash := matches[0]
var info swarm.Info
if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil {
t.Fatal(err)
}
cluster.Stop()
defer cluster.Cleanup()
// generate an export.tar
exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x"))
exportCmd.ExpectExit()
// start second cluster
cluster2 := newTestCluster(t, 1)
var info2 swarm.Info
if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil {
t.Fatal(err)
}
// stop second cluster, so that we close LevelDB
cluster2.Stop()
defer cluster2.Cleanup()
// import the export.tar
importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x"))
importCmd.ExpectExit()
// spin second cluster back up
cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x"))
// try to fetch imported file
res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("expected HTTP status %d, got %s", 200, res.Status)
}
// compare downloaded file with the generated random file
mustEqualFiles(t, f, res.Body)
}
func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) {
h := md5.New()
upLen, err := io.Copy(h, up)
if err != nil {
t.Fatal(err)
}
upHash := h.Sum(nil)
h.Reset()
downLen, err := io.Copy(h, down)
if err != nil {
t.Fatal(err)
}
downHash := h.Sum(nil)
if !bytes.Equal(upHash, downHash) || upLen != downLen {
t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen)
}
}
func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) {
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
if err != nil {
t.Fatal(err)
}
// callback for tmp file cleanup
teardown = func() {
tmp.Close()
os.Remove(tmp.Name())
}
// write 10mb random data to file
buf := make([]byte, 10000000)
_, err = rand.Read(buf)
if err != nil {
t.Fatal(err)
}
ioutil.WriteFile(tmp.Name(), buf, 0755)
return tmp, teardown
}

@ -0,0 +1,127 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/fuse"
"gopkg.in/urfave/cli.v1"
)
func mount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 2 {
utils.Fatalf("Usage: swarm fs mount --ipcpath <path to bzzd.ipc> <manifestHash> <file name>")
}
client, err := dialRPC(cliContext)
if err != nil {
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
mf := &fuse.MountInfo{}
mountPoint, err := filepath.Abs(filepath.Clean(args[1]))
if err != nil {
utils.Fatalf("error expanding path for mount point: %v", err)
}
err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint)
if err != nil {
utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err)
}
}
func unmount(cliContext *cli.Context) {
args := cliContext.Args()
if len(args) < 1 {
utils.Fatalf("Usage: swarm fs unmount --ipcpath <path to bzzd.ipc> <mount path>")
}
client, err := dialRPC(cliContext)
if err != nil {
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
mf := fuse.MountInfo{}
err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0])
if err != nil {
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
}
fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference
}
func listMounts(cliContext *cli.Context) {
client, err := dialRPC(cliContext)
if err != nil {
utils.Fatalf("had an error dailing to RPC endpoint: %v", err)
}
defer client.Close()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
mf := []fuse.MountInfo{}
err = client.CallContext(ctx, &mf, "swarmfs_listmounts")
if err != nil {
utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err)
}
if len(mf) == 0 {
fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n")
} else {
fmt.Printf("Found %d swarmfs mount(s):\n", len(mf))
for i, mountInfo := range mf {
fmt.Printf("%d:\n", i)
fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint)
fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest)
fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest)
}
}
}
func dialRPC(ctx *cli.Context) (*rpc.Client, error) {
var endpoint string
if ctx.IsSet(utils.IPCPathFlag.Name) {
endpoint = ctx.String(utils.IPCPathFlag.Name)
} else {
utils.Fatalf("swarm ipc endpoint not specified")
}
if endpoint == "" {
endpoint = node.DefaultIPCEndpoint(clientIdentifier)
} else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") {
// Backwards compatibility with geth < 1.5 which required
// these prefixes.
endpoint = endpoint[4:]
}
return rpc.Dial(endpoint)
}

@ -0,0 +1,234 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/log"
colorable "github.com/mattn/go-colorable"
)
func init() {
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
type testFile struct {
filePath string
content string
}
// TestCLISwarmFs is a high-level test of swarmfs
func TestCLISwarmFs(t *testing.T) {
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
// create a tmp dir
mountPoint, err := ioutil.TempDir("", "swarm-test")
log.Debug("swarmfs cli test", "1st mount", mountPoint)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(mountPoint)
handlingNode := cluster.Nodes[0]
mhash := doUploadEmptyDir(t, handlingNode)
log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
mount := runSwarm(t, []string{
"fs",
"mount",
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mhash,
mountPoint,
}...)
mount.ExpectExit()
filesToAssert := []*testFile{}
dirPath, err := createDirInDir(mountPoint, "testSubDir")
if err != nil {
t.Fatal(err)
}
dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir")
dummyContent := "somerandomtestcontentthatshouldbeasserted"
dirs := []string{
mountPoint,
dirPath,
dirPath2,
}
files := []string{"f1.tmp", "f2.tmp"}
for _, d := range dirs {
for _, entry := range files {
tFile, err := createTestFileInPath(d, entry, dummyContent)
if err != nil {
t.Fatal(err)
}
filesToAssert = append(filesToAssert, tFile)
}
}
if len(filesToAssert) != len(dirs)*len(files) {
t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert))
}
hashRegexp := `[a-f\d]{64}`
log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmount := runSwarm(t, []string{
"fs",
"unmount",
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
mountPoint,
}...)
_, matches := unmount.ExpectRegexp(hashRegexp)
unmount.ExpectExit()
hash := matches[0]
if hash == mhash {
t.Fatal("this should not be equal")
}
log.Debug("swarmfs cli test: asserting no files in mount point")
//check that there's nothing in the mount folder
filesInDir, err := ioutil.ReadDir(mountPoint)
if err != nil {
t.Fatalf("had an error reading the directory: %v", err)
}
if len(filesInDir) != 0 {
t.Fatal("there shouldn't be anything here")
}
secondMountPoint, err := ioutil.TempDir("", "swarm-test")
log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(secondMountPoint)
log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
//remount, check files
newMount := runSwarm(t, []string{
"fs",
"mount",
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
hash, // the latest hash
secondMountPoint,
}...)
newMount.ExpectExit()
time.Sleep(1 * time.Second)
filesInDir, err = ioutil.ReadDir(secondMountPoint)
if err != nil {
t.Fatal(err)
}
if len(filesInDir) == 0 {
t.Fatal("there should be something here")
}
log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount")
for _, file := range filesToAssert {
file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1)
fileBytes, err := ioutil.ReadFile(file.filePath)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) {
t.Fatal("this should be equal")
}
}
log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath))
unmountSec := runSwarm(t, []string{
"fs",
"unmount",
"--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath),
secondMountPoint,
}...)
_, matches = unmountSec.ExpectRegexp(hashRegexp)
unmountSec.ExpectExit()
if matches[0] != hash {
t.Fatal("these should be equal - no changes made")
}
}
func doUploadEmptyDir(t *testing.T, node *testNode) string {
// create a tmp dir
tmpDir, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
hashRegexp := `[a-f\d]{64}`
flags := []string{
"--bzzapi", node.URL,
"--recursive",
"up",
tmpDir}
log.Info("swarmfs cli test: uploading dir with 'swarm up'")
up := runSwarm(t, flags...)
_, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
hash := matches[0]
log.Info("swarmfs cli test: dir uploaded", "hash", hash)
return hash
}
func createDirInDir(createInDir string, dirToCreate string) (string, error) {
fullpath := filepath.Join(createInDir, dirToCreate)
err := os.MkdirAll(fullpath, 0777)
if err != nil {
return "", err
}
return fullpath, nil
}
func createTestFileInPath(dir, filename, content string) (*testFile, error) {
tFile := &testFile{}
filePath := filepath.Join(dir, filename)
if file, err := os.Create(filePath); err == nil {
tFile.content = content
tFile.filePath = filePath
_, err = io.WriteString(file, content)
if err != nil {
return nil, err
}
file.Close()
}
return tFile, nil
}

@ -38,11 +38,11 @@ func hash(ctx *cli.Context) {
defer f.Close() defer f.Close()
stat, _ := f.Stat() stat, _ := f.Stat()
chunker := storage.NewTreeChunker(storage.NewChunkerParams()) fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams())
key, err := chunker.Split(f, stat.Size(), nil, nil, nil) addr, _, err := fileStore.Store(f, stat.Size(), false)
if err != nil { if err != nil {
utils.Fatalf("%v\n", err) utils.Fatalf("%v\n", err)
} else { } else {
fmt.Printf("%v\n", key) fmt.Printf("%v\n", addr)
} }
} }

@ -34,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
@ -49,6 +48,22 @@ import (
) )
const clientIdentifier = "swarm" const clientIdentifier = "swarm"
const helpTemplate = `NAME:
{{.HelpName}} - {{.Usage}}
USAGE:
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
CATEGORY:
{{.Category}}{{end}}{{if .Description}}
DESCRIPTION:
{{.Description}}{{end}}{{if .VisibleFlags}}
OPTIONS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
`
var ( var (
gitCommit string // Git SHA1 commit hash of the release (set via linker flags) gitCommit string // Git SHA1 commit hash of the release (set via linker flags)
@ -87,10 +102,6 @@ var (
Usage: "Network identifier (integer, default 3=swarm testnet)", Usage: "Network identifier (integer, default 3=swarm testnet)",
EnvVar: SWARM_ENV_NETWORK_ID, EnvVar: SWARM_ENV_NETWORK_ID,
} }
SwarmConfigPathFlag = cli.StringFlag{
Name: "bzzconfig",
Usage: "DEPRECATED: please use --config path/to/TOML-file",
}
SwarmSwapEnabledFlag = cli.BoolFlag{ SwarmSwapEnabledFlag = cli.BoolFlag{
Name: "swap", Name: "swap",
Usage: "Swarm SWAP enabled (default false)", Usage: "Swarm SWAP enabled (default false)",
@ -101,10 +112,20 @@ var (
Usage: "URL of the Ethereum API provider to use to settle SWAP payments", Usage: "URL of the Ethereum API provider to use to settle SWAP payments",
EnvVar: SWARM_ENV_SWAP_API, EnvVar: SWARM_ENV_SWAP_API,
} }
SwarmSyncEnabledFlag = cli.BoolTFlag{ SwarmSyncDisabledFlag = cli.BoolTFlag{
Name: "sync", Name: "nosync",
Usage: "Swarm Syncing enabled (default true)", Usage: "Disable swarm syncing",
EnvVar: SWARM_ENV_SYNC_ENABLE, EnvVar: SWARM_ENV_SYNC_DISABLE,
}
SwarmSyncUpdateDelay = cli.DurationFlag{
Name: "sync-update-delay",
Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)",
EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY,
}
SwarmDeliverySkipCheckFlag = cli.BoolFlag{
Name: "delivery-skip-check",
Usage: "Skip chunk delivery check (default false)",
EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK,
} }
EnsAPIFlag = cli.StringSliceFlag{ EnsAPIFlag = cli.StringSliceFlag{
Name: "ens-api", Name: "ens-api",
@ -116,7 +137,7 @@ var (
Usage: "Swarm HTTP endpoint", Usage: "Swarm HTTP endpoint",
Value: "http://127.0.0.1:8500", Value: "http://127.0.0.1:8500",
} }
SwarmRecursiveUploadFlag = cli.BoolFlag{ SwarmRecursiveFlag = cli.BoolFlag{
Name: "recursive", Name: "recursive",
Usage: "Upload directories recursively", Usage: "Upload directories recursively",
} }
@ -136,20 +157,29 @@ var (
Name: "mime", Name: "mime",
Usage: "force mime type", Usage: "force mime type",
} }
SwarmEncryptedFlag = cli.BoolFlag{
Name: "encrypt",
Usage: "use encrypted upload",
}
CorsStringFlag = cli.StringFlag{ CorsStringFlag = cli.StringFlag{
Name: "corsdomain", Name: "corsdomain",
Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')",
EnvVar: SWARM_ENV_CORS, EnvVar: SWARM_ENV_CORS,
} }
SwarmStorePath = cli.StringFlag{
// the following flags are deprecated and should be removed in the future Name: "store.path",
DeprecatedEthAPIFlag = cli.StringFlag{ Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)",
Name: "ethapi", EnvVar: SWARM_ENV_STORE_PATH,
Usage: "DEPRECATED: please use --ens-api and --swap-api",
} }
DeprecatedEnsAddrFlag = cli.StringFlag{ SwarmStoreCapacity = cli.Uint64Flag{
Name: "ens-addr", Name: "store.size",
Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format", Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)",
EnvVar: SWARM_ENV_STORE_CAPACITY,
}
SwarmStoreCacheCapacity = cli.UintFlag{
Name: "store.cache.size",
Usage: "Number of recent chunks cached in memory (default 5000)",
EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY,
} }
) )
@ -180,91 +210,130 @@ func init() {
app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" app.Copyright = "Copyright 2013-2016 The go-ethereum Authors"
app.Commands = []cli.Command{ app.Commands = []cli.Command{
{ {
Action: version, Action: version,
Name: "version", CustomHelpTemplate: helpTemplate,
Usage: "Print version numbers", Name: "version",
ArgsUsage: " ", Usage: "Print version numbers",
Description: ` Description: "The output of this command is supposed to be machine-readable",
The output of this command is supposed to be machine-readable.
`,
}, },
{ {
Action: upload, Action: upload,
Name: "up", CustomHelpTemplate: helpTemplate,
Usage: "upload a file or directory to swarm using the HTTP API", Name: "up",
ArgsUsage: " <file>", Usage: "uploads a file or directory to swarm using the HTTP API",
Description: ` ArgsUsage: "<file>",
"upload a file or directory to swarm using the HTTP API and prints the root hash", Flags: []cli.Flag{SwarmEncryptedFlag},
`, Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash",
}, },
{ {
Action: list, Action: list,
Name: "ls", CustomHelpTemplate: helpTemplate,
Usage: "list files and directories contained in a manifest", Name: "ls",
ArgsUsage: " <manifest> [<prefix>]", Usage: "list files and directories contained in a manifest",
Description: ` ArgsUsage: "<manifest> [<prefix>]",
Lists files and directories contained in a manifest. Description: "Lists files and directories contained in a manifest",
`,
}, },
{ {
Action: hash, Action: hash,
Name: "hash", CustomHelpTemplate: helpTemplate,
Usage: "print the swarm hash of a file or directory", Name: "hash",
ArgsUsage: " <file>", Usage: "print the swarm hash of a file or directory",
Description: ` ArgsUsage: "<file>",
Prints the swarm hash of file or directory. Description: "Prints the swarm hash of file or directory",
`,
}, },
{ {
Name: "manifest", Action: download,
Usage: "update a MANIFEST", Name: "down",
ArgsUsage: "manifest COMMAND", Flags: []cli.Flag{SwarmRecursiveFlag},
Usage: "downloads a swarm manifest or a file inside a manifest",
ArgsUsage: " <uri> [<dir>]",
Description: ` Description: `
Updates a MANIFEST by adding/removing/updating the hash of a path. Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries.
`, `,
},
{
Name: "manifest",
CustomHelpTemplate: helpTemplate,
Usage: "perform operations on swarm manifests",
ArgsUsage: "COMMAND",
Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove",
Subcommands: []cli.Command{ Subcommands: []cli.Command{
{ {
Action: add, Action: add,
Name: "add", CustomHelpTemplate: helpTemplate,
Usage: "add a new path to the manifest", Name: "add",
ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]", Usage: "add a new path to the manifest",
Description: ` ArgsUsage: "<MANIFEST> <path> <hash> [<content-type>]",
Adds a new path to the manifest Description: "Adds a new path to the manifest",
`,
}, },
{ {
Action: update, Action: update,
Name: "update", CustomHelpTemplate: helpTemplate,
Usage: "update the hash for an already existing path in the manifest", Name: "update",
ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]", Usage: "update the hash for an already existing path in the manifest",
Description: ` ArgsUsage: "<MANIFEST> <path> <newhash> [<newcontent-type>]",
Update the hash for an already existing path in the manifest Description: "Update the hash for an already existing path in the manifest",
`,
}, },
{ {
Action: remove, Action: remove,
Name: "remove", CustomHelpTemplate: helpTemplate,
Usage: "removes a path from the manifest", Name: "remove",
ArgsUsage: "<MANIFEST> <path>", Usage: "removes a path from the manifest",
Description: ` ArgsUsage: "<MANIFEST> <path>",
Removes a path from the manifest Description: "Removes a path from the manifest",
`,
}, },
}, },
}, },
{ {
Name: "db", Name: "fs",
Usage: "manage the local chunk database", CustomHelpTemplate: helpTemplate,
ArgsUsage: "db COMMAND", Usage: "perform FUSE operations",
Description: ` ArgsUsage: "fs COMMAND",
Manage the local chunk database. Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node",
`,
Subcommands: []cli.Command{ Subcommands: []cli.Command{
{ {
Action: dbExport, Action: mount,
Name: "export", CustomHelpTemplate: helpTemplate,
Usage: "export a local chunk database as a tar archive (use - to send to stdout)", Name: "mount",
ArgsUsage: "<chunkdb> <file>", Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "mount a swarm hash to a mount point",
ArgsUsage: "swarm fs mount --ipcpath <path to bzzd.ipc> <manifest hash> <mount point>",
Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: unmount,
CustomHelpTemplate: helpTemplate,
Name: "unmount",
Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "unmount a swarmfs mount",
ArgsUsage: "swarm fs unmount --ipcpath <path to bzzd.ipc> <mount point>",
Description: "Unmounts a swarmfs mount residing at <mount point>. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
{
Action: listMounts,
CustomHelpTemplate: helpTemplate,
Name: "list",
Flags: []cli.Flag{utils.IPCPathFlag},
Usage: "list swarmfs mounts",
ArgsUsage: "swarm fs list --ipcpath <path to bzzd.ipc>",
Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file",
},
},
},
{
Name: "db",
CustomHelpTemplate: helpTemplate,
Usage: "manage the local chunk database",
ArgsUsage: "db COMMAND",
Description: "Manage the local chunk database",
Subcommands: []cli.Command{
{
Action: dbExport,
CustomHelpTemplate: helpTemplate,
Name: "export",
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
ArgsUsage: "<chunkdb> <file>",
Description: ` Description: `
Export a local chunk database as a tar archive (use - to send to stdout). Export a local chunk database as a tar archive (use - to send to stdout).
@ -277,10 +346,11 @@ pv(1) tool to get a progress bar:
`, `,
}, },
{ {
Action: dbImport, Action: dbImport,
Name: "import", CustomHelpTemplate: helpTemplate,
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", Name: "import",
ArgsUsage: "<chunkdb> <file>", Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
ArgsUsage: "<chunkdb> <file>",
Description: ` Description: `
Import chunks from a tar archive into a local chunk database (use - to read from stdin). Import chunks from a tar archive into a local chunk database (use - to read from stdin).
@ -293,27 +363,16 @@ pv(1) tool to get a progress bar:
`, `,
}, },
{ {
Action: dbClean, Action: dbClean,
Name: "clean", CustomHelpTemplate: helpTemplate,
Usage: "remove corrupt entries from a local chunk database", Name: "clean",
ArgsUsage: "<chunkdb>", Usage: "remove corrupt entries from a local chunk database",
Description: ` ArgsUsage: "<chunkdb>",
Remove corrupt entries from a local chunk database. Description: "Remove corrupt entries from a local chunk database",
`,
}, },
}, },
}, },
{
Action: func(ctx *cli.Context) {
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
},
Name: "cleandb",
Usage: "DEPRECATED: use 'swarm db clean'",
ArgsUsage: " ",
Description: `
DEPRECATED: use 'swarm db clean'.
`,
},
// See config.go // See config.go
DumpConfigCommand, DumpConfigCommand,
} }
@ -339,10 +398,11 @@ DEPRECATED: use 'swarm db clean'.
CorsStringFlag, CorsStringFlag,
EnsAPIFlag, EnsAPIFlag,
SwarmTomlConfigPathFlag, SwarmTomlConfigPathFlag,
SwarmConfigPathFlag,
SwarmSwapEnabledFlag, SwarmSwapEnabledFlag,
SwarmSwapAPIFlag, SwarmSwapAPIFlag,
SwarmSyncEnabledFlag, SwarmSyncDisabledFlag,
SwarmSyncUpdateDelay,
SwarmDeliverySkipCheckFlag,
SwarmListenAddrFlag, SwarmListenAddrFlag,
SwarmPortFlag, SwarmPortFlag,
SwarmAccountFlag, SwarmAccountFlag,
@ -350,15 +410,24 @@ DEPRECATED: use 'swarm db clean'.
ChequebookAddrFlag, ChequebookAddrFlag,
// upload flags // upload flags
SwarmApiFlag, SwarmApiFlag,
SwarmRecursiveUploadFlag, SwarmRecursiveFlag,
SwarmWantManifestFlag, SwarmWantManifestFlag,
SwarmUploadDefaultPath, SwarmUploadDefaultPath,
SwarmUpFromStdinFlag, SwarmUpFromStdinFlag,
SwarmUploadMimeType, SwarmUploadMimeType,
//deprecated flags // storage flags
DeprecatedEthAPIFlag, SwarmStorePath,
DeprecatedEnsAddrFlag, SwarmStoreCapacity,
} SwarmStoreCacheCapacity,
}
rpcFlags := []cli.Flag{
utils.WSEnabledFlag,
utils.WSListenAddrFlag,
utils.WSPortFlag,
utils.WSApiFlag,
utils.WSAllowedOriginsFlag,
}
app.Flags = append(app.Flags, rpcFlags...)
app.Flags = append(app.Flags, debug.Flags...) app.Flags = append(app.Flags, debug.Flags...)
app.Flags = append(app.Flags, swarmmetrics.Flags...) app.Flags = append(app.Flags, swarmmetrics.Flags...)
app.Before = func(ctx *cli.Context) error { app.Before = func(ctx *cli.Context) error {
@ -383,16 +452,12 @@ func main() {
} }
func version(ctx *cli.Context) error { func version(ctx *cli.Context) error {
fmt.Println(strings.Title(clientIdentifier)) fmt.Println("Version:", SWARM_VERSION)
fmt.Println("Version:", params.Version)
if gitCommit != "" { if gitCommit != "" {
fmt.Println("Git Commit:", gitCommit) fmt.Println("Git Commit:", gitCommit)
} }
fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
fmt.Println("Go Version:", runtime.Version()) fmt.Println("Go Version:", runtime.Version())
fmt.Println("OS:", runtime.GOOS) fmt.Println("OS:", runtime.GOOS)
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
fmt.Printf("GOROOT=%s\n", runtime.GOROOT())
return nil return nil
} }
@ -405,6 +470,10 @@ func bzzd(ctx *cli.Context) error {
} }
cfg := defaultNodeConfig cfg := defaultNodeConfig
//pss operates on ws
cfg.WSModules = append(cfg.WSModules, "pss")
//geth only supports --datadir via command line //geth only supports --datadir via command line
//in order to be consistent within swarm, if we pass --datadir via environment variable //in order to be consistent within swarm, if we pass --datadir via environment variable
//or via config file, we get the same directory for geth and swarm //or via config file, we get the same directory for geth and swarm
@ -421,7 +490,7 @@ func bzzd(ctx *cli.Context) error {
//due to overriding behavior //due to overriding behavior
initSwarmNode(bzzconfig, stack, ctx) initSwarmNode(bzzconfig, stack, ctx)
//register BZZ as node.Service in the ethereum node //register BZZ as node.Service in the ethereum node
registerBzzService(bzzconfig, ctx, stack) registerBzzService(bzzconfig, stack)
//start the node //start the node
utils.StartNode(stack) utils.StartNode(stack)
@ -439,7 +508,7 @@ func bzzd(ctx *cli.Context) error {
bootnodes := strings.Split(bzzconfig.BootNodes, ",") bootnodes := strings.Split(bzzconfig.BootNodes, ",")
injectBootnodes(stack.Server(), bootnodes) injectBootnodes(stack.Server(), bootnodes)
} else { } else {
if bzzconfig.NetworkId == 3 { if bzzconfig.NetworkID == 3 {
injectBootnodes(stack.Server(), testbetBootNodes) injectBootnodes(stack.Server(), testbetBootNodes)
} }
} }
@ -448,21 +517,11 @@ func bzzd(ctx *cli.Context) error {
return nil return nil
} }
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) { func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) {
//define the swarm service boot function //define the swarm service boot function
boot := func(ctx *node.ServiceContext) (node.Service, error) { boot := func(_ *node.ServiceContext) (node.Service, error) {
var swapClient *ethclient.Client // In production, mockStore must be always nil.
var err error return swarm.NewSwarm(bzzconfig, nil)
if bzzconfig.SwapApi != "" {
log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi)
swapClient, err = ethclient.Dial(bzzconfig.SwapApi)
if err != nil {
return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err)
}
}
return swarm.NewSwarm(ctx, swapClient, bzzconfig)
} }
//register within the ethereum node //register within the ethereum node
if err := stack.Register(boot); err != nil { if err := stack.Register(boot); err != nil {

@ -131,13 +131,13 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
longestPathEntry = api.ManifestEntry{} longestPathEntry = api.ManifestEntry{}
) )
mroot, err := client.DownloadManifest(mhash) mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil { if err != nil {
utils.Fatalf("Manifest download failed: %v", err) utils.Fatalf("Manifest download failed: %v", err)
} }
//TODO: check if the "hash" to add is valid and present in swarm //TODO: check if the "hash" to add is valid and present in swarm
_, err = client.DownloadManifest(hash) _, _, err = client.DownloadManifest(hash)
if err != nil { if err != nil {
utils.Fatalf("Hash to add is not present: %v", err) utils.Fatalf("Hash to add is not present: %v", err)
} }
@ -180,7 +180,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin
mroot.Entries = append(mroot.Entries, newEntry) mroot.Entries = append(mroot.Entries, newEntry)
} }
newManifestHash, err := client.UploadManifest(mroot) newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil { if err != nil {
utils.Fatalf("Manifest upload failed: %v", err) utils.Fatalf("Manifest upload failed: %v", err)
} }
@ -197,7 +197,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
longestPathEntry = api.ManifestEntry{} longestPathEntry = api.ManifestEntry{}
) )
mroot, err := client.DownloadManifest(mhash) mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil { if err != nil {
utils.Fatalf("Manifest download failed: %v", err) utils.Fatalf("Manifest download failed: %v", err)
} }
@ -257,7 +257,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st
mroot = newMRoot mroot = newMRoot
} }
newManifestHash, err := client.UploadManifest(mroot) newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil { if err != nil {
utils.Fatalf("Manifest upload failed: %v", err) utils.Fatalf("Manifest upload failed: %v", err)
} }
@ -273,7 +273,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
longestPathEntry = api.ManifestEntry{} longestPathEntry = api.ManifestEntry{}
) )
mroot, err := client.DownloadManifest(mhash) mroot, isEncrypted, err := client.DownloadManifest(mhash)
if err != nil { if err != nil {
utils.Fatalf("Manifest download failed: %v", err) utils.Fatalf("Manifest download failed: %v", err)
} }
@ -323,7 +323,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string {
mroot = newMRoot mroot = newMRoot
} }
newManifestHash, err := client.UploadManifest(mroot) newManifestHash, err := client.UploadManifest(mroot, isEncrypted)
if err != nil { if err != nil {
utils.Fatalf("Manifest upload failed: %v", err) utils.Fatalf("Manifest upload failed: %v", err)
} }

@ -81,6 +81,7 @@ type testCluster struct {
// //
// When starting more than one node, they are connected together using the // When starting more than one node, they are connected together using the
// admin SetPeer RPC method. // admin SetPeer RPC method.
func newTestCluster(t *testing.T, size int) *testCluster { func newTestCluster(t *testing.T, size int) *testCluster {
cluster := &testCluster{} cluster := &testCluster{}
defer func() { defer func() {
@ -96,18 +97,7 @@ func newTestCluster(t *testing.T, size int) *testCluster {
cluster.TmpDir = tmpdir cluster.TmpDir = tmpdir
// start the nodes // start the nodes
cluster.Nodes = make([]*testNode, 0, size) cluster.StartNewNodes(t, size)
for i := 0; i < size; i++ {
dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i))
if err := os.Mkdir(dir, 0700); err != nil {
t.Fatal(err)
}
node := newTestNode(t, dir)
node.Name = fmt.Sprintf("swarm%02d", i)
cluster.Nodes = append(cluster.Nodes, node)
}
if size == 1 { if size == 1 {
return cluster return cluster
@ -145,14 +135,51 @@ func (c *testCluster) Shutdown() {
os.RemoveAll(c.TmpDir) os.RemoveAll(c.TmpDir)
} }
func (c *testCluster) Stop() {
for _, node := range c.Nodes {
node.Shutdown()
}
}
func (c *testCluster) StartNewNodes(t *testing.T, size int) {
c.Nodes = make([]*testNode, 0, size)
for i := 0; i < size; i++ {
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
if err := os.Mkdir(dir, 0700); err != nil {
t.Fatal(err)
}
node := newTestNode(t, dir)
node.Name = fmt.Sprintf("swarm%02d", i)
c.Nodes = append(c.Nodes, node)
}
}
func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
c.Nodes = make([]*testNode, 0, size)
for i := 0; i < size; i++ {
dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
node := existingTestNode(t, dir, bzzaccount)
node.Name = fmt.Sprintf("swarm%02d", i)
c.Nodes = append(c.Nodes, node)
}
}
func (c *testCluster) Cleanup() {
os.RemoveAll(c.TmpDir)
}
type testNode struct { type testNode struct {
Name string Name string
Addr string Addr string
URL string URL string
Enode string Enode string
Dir string Dir string
Client *rpc.Client IpcPath string
Cmd *cmdtest.TestCmd Client *rpc.Client
Cmd *cmdtest.TestCmd
} }
const testPassphrase = "swarm-test-passphrase" const testPassphrase = "swarm-test-passphrase"
@ -181,6 +208,72 @@ func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accoun
return conf, account return conf, account
} }
func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
conf, _ := getTestAccount(t, dir)
node := &testNode{Dir: dir}
// use a unique IPCPath when running tests on Windows
if runtime.GOOS == "windows" {
conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
}
// assign ports
httpPort, err := assignTCPPort()
if err != nil {
t.Fatal(err)
}
p2pPort, err := assignTCPPort()
if err != nil {
t.Fatal(err)
}
// start the node
node.Cmd = runSwarm(t,
"--port", p2pPort,
"--nodiscover",
"--datadir", dir,
"--ipcpath", conf.IPCPath,
"--ens-api", "",
"--bzzaccount", bzzaccount,
"--bzznetworkid", "321",
"--bzzport", httpPort,
"--verbosity", "6",
)
node.Cmd.InputLine(testPassphrase)
defer func() {
if t.Failed() {
node.Shutdown()
}
}()
// wait for the node to start
for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
node.Client, err = rpc.Dial(conf.IPCEndpoint())
if err == nil {
break
}
}
if node.Client == nil {
t.Fatal(err)
}
// load info
var info swarm.Info
if err := node.Client.Call(&info, "bzz_info"); err != nil {
t.Fatal(err)
}
node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
node.URL = "http://" + node.Addr
var nodeInfo p2p.NodeInfo
if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
t.Fatal(err)
}
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
return node
}
func newTestNode(t *testing.T, dir string) *testNode { func newTestNode(t *testing.T, dir string) *testNode {
conf, account := getTestAccount(t, dir) conf, account := getTestAccount(t, dir)
@ -239,6 +332,7 @@ func newTestNode(t *testing.T, dir string) *testNode {
t.Fatal(err) t.Fatal(err)
} }
node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort) node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
node.IpcPath = conf.IPCPath
return node return node
} }

@ -0,0 +1,101 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"os"
"sort"
"github.com/ethereum/go-ethereum/log"
colorable "github.com/mattn/go-colorable"
cli "gopkg.in/urfave/cli.v1"
)
var (
endpoints []string
includeLocalhost bool
cluster string
scheme string
filesize int
from int
to int
)
func main() {
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
app := cli.NewApp()
app.Name = "smoke-test"
app.Usage = ""
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "cluster-endpoint",
Value: "testing",
Usage: "cluster to point to (open, or testing)",
Destination: &cluster,
},
cli.IntFlag{
Name: "cluster-from",
Value: 8501,
Usage: "swarm node (from)",
Destination: &from,
},
cli.IntFlag{
Name: "cluster-to",
Value: 8512,
Usage: "swarm node (to)",
Destination: &to,
},
cli.StringFlag{
Name: "cluster-scheme",
Value: "http",
Usage: "http or https",
Destination: &scheme,
},
cli.BoolFlag{
Name: "include-localhost",
Usage: "whether to include localhost:8500 as an endpoint",
Destination: &includeLocalhost,
},
cli.IntFlag{
Name: "filesize",
Value: 1,
Usage: "file size for generated random file in MB",
Destination: &filesize,
},
}
app.Commands = []cli.Command{
{
Name: "upload_and_sync",
Aliases: []string{"c"},
Usage: "upload and sync",
Action: cliUploadAndSync,
},
}
sort.Sort(cli.FlagsByName(app.Flags))
sort.Sort(cli.CommandsByName(app.Commands))
err := app.Run(os.Args)
if err != nil {
log.Error(err.Error())
}
}

@ -0,0 +1,184 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"bytes"
"crypto/md5"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
func generateEndpoints(scheme string, cluster string, from int, to int) {
for port := from; port <= to; port++ {
endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster))
}
if includeLocalhost {
endpoints = append(endpoints, "http://localhost:8500")
}
}
func cliUploadAndSync(c *cli.Context) error {
defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now())
generateEndpoints(scheme, cluster, from, to)
log.Info("uploading to " + endpoints[0] + " and syncing")
f, cleanup := generateRandomFile(filesize * 1000000)
defer cleanup()
hash, err := upload(f, endpoints[0])
if err != nil {
log.Error(err.Error())
return err
}
fhash, err := digest(f)
if err != nil {
log.Error(err.Error())
return err
}
log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
if filesize < 10 {
time.Sleep(15 * time.Second)
} else {
time.Sleep(2 * time.Duration(filesize) * time.Second)
}
wg := sync.WaitGroup{}
for _, endpoint := range endpoints {
endpoint := endpoint
ruid := uuid.New()[:8]
wg.Add(1)
go func(endpoint string, ruid string) {
for {
err := fetch(hash, endpoint, fhash, ruid)
if err != nil {
continue
}
wg.Done()
return
}
}(endpoint, ruid)
}
wg.Wait()
log.Info("all endpoints synced random file successfully")
return nil
}
// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file
func fetch(hash string, endpoint string, original []byte, ruid string) error {
log.Trace("sleeping", "ruid", ruid)
time.Sleep(1 * time.Second)
log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
res, err := http.Get(endpoint + "/bzz:/" + hash + "/")
if err != nil {
log.Warn(err.Error(), "ruid", ruid)
return err
}
log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
if res.StatusCode != 200 {
err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
log.Warn(err.Error(), "ruid", ruid)
return err
}
defer res.Body.Close()
rdigest, err := digest(res.Body)
if err != nil {
log.Warn(err.Error(), "ruid", ruid)
return err
}
if !bytes.Equal(rdigest, original) {
err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
log.Warn(err.Error(), "ruid", ruid)
return err
}
log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
return nil
}
// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd
func upload(f *os.File, endpoint string) (string, error) {
var out bytes.Buffer
cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name())
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
return "", err
}
hash := strings.TrimRight(out.String(), "\r\n")
return hash, nil
}
func digest(r io.Reader) ([]byte, error) {
h := md5.New()
_, err := io.Copy(h, r)
if err != nil {
return nil, err
}
return h.Sum(nil), nil
}
// generateRandomFile is creating a temporary file with the requested byte size
func generateRandomFile(size int) (f *os.File, teardown func()) {
// create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test")
if err != nil {
panic(err)
}
// callback for tmp file cleanup
teardown = func() {
tmp.Close()
os.Remove(tmp.Name())
}
buf := make([]byte, size)
_, err = rand.Read(buf)
if err != nil {
panic(err)
}
ioutil.WriteFile(tmp.Name(), buf, 0755)
return tmp, teardown
}

@ -40,12 +40,13 @@ func upload(ctx *cli.Context) {
args := ctx.Args() args := ctx.Args()
var ( var (
bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/")
recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name) recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name)
wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name)
defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name) defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name)
fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name) fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name)
mimeType = ctx.GlobalString(SwarmUploadMimeType.Name) mimeType = ctx.GlobalString(SwarmUploadMimeType.Name)
client = swarm.NewClient(bzzapi) client = swarm.NewClient(bzzapi)
toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name)
file string file string
) )
@ -76,7 +77,7 @@ func upload(ctx *cli.Context) {
utils.Fatalf("Error opening file: %s", err) utils.Fatalf("Error opening file: %s", err)
} }
defer f.Close() defer f.Close()
hash, err := client.UploadRaw(f, f.Size) hash, err := client.UploadRaw(f, f.Size, toEncrypt)
if err != nil { if err != nil {
utils.Fatalf("Upload failed: %s", err) utils.Fatalf("Upload failed: %s", err)
} }
@ -97,7 +98,7 @@ func upload(ctx *cli.Context) {
if !recursive { if !recursive {
return "", errors.New("Argument is a directory and recursive upload is disabled") return "", errors.New("Argument is a directory and recursive upload is disabled")
} }
return client.UploadDirectory(file, defaultPath, "") return client.UploadDirectory(file, defaultPath, "", toEncrypt)
} }
} else { } else {
doUpload = func() (string, error) { doUpload = func() (string, error) {
@ -110,7 +111,7 @@ func upload(ctx *cli.Context) {
mimeType = detectMimeType(file) mimeType = detectMimeType(file)
} }
f.ContentType = mimeType f.ContentType = mimeType
return client.Upload(f, "") return client.Upload(f, "", toEncrypt)
} }
} }
hash, err := doUpload() hash, err := doUpload()

@ -17,60 +17,259 @@
package main package main
import ( import (
"bytes"
"flag"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"os" "os"
"path"
"path/filepath"
"strings"
"testing" "testing"
"time"
"github.com/ethereum/go-ethereum/log"
swarm "github.com/ethereum/go-ethereum/swarm/api/client"
colorable "github.com/mattn/go-colorable"
) )
var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
func init() {
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
// TestCLISwarmUp tests that running 'swarm up' makes the resulting file // TestCLISwarmUp tests that running 'swarm up' makes the resulting file
// available from all nodes via the HTTP API // available from all nodes via the HTTP API
func TestCLISwarmUp(t *testing.T) { func TestCLISwarmUp(t *testing.T) {
// start 3 node cluster testCLISwarmUp(false, t)
t.Log("starting 3 node cluster") }
func TestCLISwarmUpRecursive(t *testing.T) {
testCLISwarmUpRecursive(false, t)
}
// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file
// available from all nodes via the HTTP API
func TestCLISwarmUpEncrypted(t *testing.T) {
testCLISwarmUp(true, t)
}
func TestCLISwarmUpEncryptedRecursive(t *testing.T) {
testCLISwarmUpRecursive(true, t)
}
func testCLISwarmUp(toEncrypt bool, t *testing.T) {
log.Info("starting 3 node cluster")
cluster := newTestCluster(t, 3) cluster := newTestCluster(t, 3)
defer cluster.Shutdown() defer cluster.Shutdown()
// create a tmp file // create a tmp file
tmp, err := ioutil.TempFile("", "swarm-test") tmp, err := ioutil.TempFile("", "swarm-test")
assertNil(t, err) if err != nil {
t.Fatal(err)
}
defer tmp.Close() defer tmp.Close()
defer os.Remove(tmp.Name()) defer os.Remove(tmp.Name())
_, err = io.WriteString(tmp, "data")
assertNil(t, err)
// write data to file
data := "notsorandomdata"
_, err = io.WriteString(tmp, data)
if err != nil {
t.Fatal(err)
}
hashRegexp := `[a-f\d]{64}`
flags := []string{
"--bzzapi", cluster.Nodes[0].URL,
"up",
tmp.Name()}
if toEncrypt {
hashRegexp = `[a-f\d]{128}`
flags = []string{
"--bzzapi", cluster.Nodes[0].URL,
"up",
"--encrypt",
tmp.Name()}
}
// upload the file with 'swarm up' and expect a hash // upload the file with 'swarm up' and expect a hash
t.Log("uploading file with 'swarm up'") log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name()) up := runSwarm(t, flags...)
_, matches := up.ExpectRegexp(`[a-f\d]{64}`) _, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit() up.ExpectExit()
hash := matches[0] hash := matches[0]
t.Logf("file uploaded with hash %s", hash) log.Info("file uploaded", "hash", hash)
// get the file from the HTTP API of each node // get the file from the HTTP API of each node
for _, node := range cluster.Nodes { for _, node := range cluster.Nodes {
t.Logf("getting file from %s", node.Name) log.Info("getting file from node", "node", node.Name)
res, err := http.Get(node.URL + "/bzz:/" + hash) res, err := http.Get(node.URL + "/bzz:/" + hash)
assertNil(t, err) if err != nil {
assertHTTPResponse(t, res, http.StatusOK, "data") t.Fatal(err)
}
defer res.Body.Close()
reply, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("expected HTTP status 200, got %s", res.Status)
}
if string(reply) != data {
t.Fatalf("expected HTTP body %q, got %q", data, reply)
}
log.Debug("verifying uploaded file using `swarm down`")
//try to get the content with `swarm down`
tmpDownload, err := ioutil.TempDir("", "swarm-test")
tmpDownload = path.Join(tmpDownload, "tmpfile.tmp")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDownload)
bzzLocator := "bzz:/" + hash
flags = []string{
"--bzzapi", cluster.Nodes[0].URL,
"down",
bzzLocator,
tmpDownload,
}
down := runSwarm(t, flags...)
down.ExpectExit()
fi, err := os.Stat(tmpDownload)
if err != nil {
t.Fatalf("could not stat path: %v", err)
}
switch mode := fi.Mode(); {
case mode.IsRegular():
downloadedBytes, err := ioutil.ReadFile(tmpDownload)
if err != nil {
t.Fatalf("had an error reading the downloaded file: %v", err)
}
if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) {
t.Fatalf("retrieved data and posted data not equal!")
}
default:
t.Fatalf("expected to download regular file, got %s", fi.Mode())
}
}
timeout := time.Duration(2 * time.Second)
httpClient := http.Client{
Timeout: timeout,
}
// try to squeeze a timeout by getting an non-existent hash from each node
for _, node := range cluster.Nodes {
_, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340")
// we're speeding up the timeout here since netstore has a 60 seconds timeout on a request
if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") {
t.Fatal(err)
}
// this is disabled since it takes 60s due to netstore timeout
// if res.StatusCode != 404 {
// t.Fatalf("expected HTTP status 404, got %s", res.Status)
// }
} }
} }
func assertNil(t *testing.T, err error) { func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) {
fmt.Println("starting 3 node cluster")
cluster := newTestCluster(t, 3)
defer cluster.Shutdown()
tmpUploadDir, err := ioutil.TempDir("", "swarm-test")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
} defer os.RemoveAll(tmpUploadDir)
// create tmp files
data := "notsorandomdata"
for _, path := range []string{"tmp1", "tmp2"} {
if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil {
t.Fatal(err)
}
}
func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) { hashRegexp := `[a-f\d]{64}`
defer res.Body.Close() flags := []string{
if res.StatusCode != expectedStatus { "--bzzapi", cluster.Nodes[0].URL,
t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status) "--recursive",
"up",
tmpUploadDir}
if toEncrypt {
hashRegexp = `[a-f\d]{128}`
flags = []string{
"--bzzapi", cluster.Nodes[0].URL,
"--recursive",
"up",
"--encrypt",
tmpUploadDir}
} }
data, err := ioutil.ReadAll(res.Body) // upload the file with 'swarm up' and expect a hash
assertNil(t, err) log.Info(fmt.Sprintf("uploading file with 'swarm up'"))
if string(data) != expectedBody { up := runSwarm(t, flags...)
t.Fatalf("expected HTTP body %q, got %q", expectedBody, data) _, matches := up.ExpectRegexp(hashRegexp)
up.ExpectExit()
hash := matches[0]
log.Info("dir uploaded", "hash", hash)
// get the file from the HTTP API of each node
for _, node := range cluster.Nodes {
log.Info("getting file from node", "node", node.Name)
//try to get the content with `swarm down`
tmpDownload, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDownload)
bzzLocator := "bzz:/" + hash
flagss := []string{}
flagss = []string{
"--bzzapi", cluster.Nodes[0].URL,
"down",
"--recursive",
bzzLocator,
tmpDownload,
}
fmt.Println("downloading from swarm with recursive")
down := runSwarm(t, flagss...)
down.ExpectExit()
files, err := ioutil.ReadDir(tmpDownload)
for _, v := range files {
fi, err := os.Stat(path.Join(tmpDownload, v.Name()))
if err != nil {
t.Fatalf("got an error: %v", err)
}
switch mode := fi.Mode(); {
case mode.IsRegular():
if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil {
t.Fatalf("encountered an error opening the file returned from the CLI: %v", err)
} else {
ff := make([]byte, len(data))
io.ReadFull(file, ff)
buf := bytes.NewBufferString(data)
if !bytes.Equal(ff, buf.Bytes()) {
t.Fatalf("retrieved data and posted data not equal!")
}
}
default:
t.Fatalf("this shouldnt happen")
}
}
if err != nil {
t.Fatalf("could not list files at: %v", files)
}
} }
} }

@ -31,10 +31,10 @@ var (
egressTrafficMeter = metrics.NewRegisteredMeter("p2p/OutboundTraffic", nil) egressTrafficMeter = metrics.NewRegisteredMeter("p2p/OutboundTraffic", nil)
) )
// meteredConn is a wrapper around a network TCP connection that meters both the // meteredConn is a wrapper around a net.Conn that meters both the
// inbound and outbound network traffic. // inbound and outbound network traffic.
type meteredConn struct { type meteredConn struct {
*net.TCPConn // Network connection to wrap with metering net.Conn // Network connection to wrap with metering
} }
// newMeteredConn creates a new metered connection, also bumping the ingress or // newMeteredConn creates a new metered connection, also bumping the ingress or
@ -51,13 +51,13 @@ func newMeteredConn(conn net.Conn, ingress bool) net.Conn {
} else { } else {
egressConnectMeter.Mark(1) egressConnectMeter.Mark(1)
} }
return &meteredConn{conn.(*net.TCPConn)} return &meteredConn{Conn: conn}
} }
// Read delegates a network read to the underlying connection, bumping the ingress // Read delegates a network read to the underlying connection, bumping the ingress
// traffic meter along the way. // traffic meter along the way.
func (c *meteredConn) Read(b []byte) (n int, err error) { func (c *meteredConn) Read(b []byte) (n int, err error) {
n, err = c.TCPConn.Read(b) n, err = c.Conn.Read(b)
ingressTrafficMeter.Mark(int64(n)) ingressTrafficMeter.Mark(int64(n))
return return
} }
@ -65,7 +65,7 @@ func (c *meteredConn) Read(b []byte) (n int, err error) {
// Write delegates a network write to the underlying connection, bumping the // Write delegates a network write to the underlying connection, bumping the
// egress traffic meter along the way. // egress traffic meter along the way.
func (c *meteredConn) Write(b []byte) (n int, err error) { func (c *meteredConn) Write(b []byte) (n int, err error) {
n, err = c.TCPConn.Write(b) n, err = c.Conn.Write(b)
egressTrafficMeter.Mark(int64(n)) egressTrafficMeter.Mark(int64(n))
return return
} }

@ -17,6 +17,7 @@
package p2p package p2p
import ( import (
"errors"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -31,6 +32,10 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
var (
ErrShuttingDown = errors.New("shutting down")
)
const ( const (
baseProtocolVersion = 5 baseProtocolVersion = 5
baseProtocolLength = uint64(16) baseProtocolLength = uint64(16)
@ -393,7 +398,7 @@ func (rw *protoRW) WriteMsg(msg Msg) (err error) {
// as well but we don't want to rely on that. // as well but we don't want to rely on that.
rw.werr <- err rw.werr <- err
case <-rw.closed: case <-rw.closed:
err = fmt.Errorf("shutting down") err = ErrShuttingDown
} }
return err return err
} }

@ -31,10 +31,12 @@ package protocols
import ( import (
"context" "context"
"fmt" "fmt"
"io"
"reflect" "reflect"
"sync" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
) )
@ -202,6 +204,11 @@ func NewPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *Spec) *Peer {
func (p *Peer) Run(handler func(msg interface{}) error) error { func (p *Peer) Run(handler func(msg interface{}) error) error {
for { for {
if err := p.handleIncoming(handler); err != nil { if err := p.handleIncoming(handler); err != nil {
if err != io.EOF {
metrics.GetOrRegisterCounter("peer.handleincoming.error", nil).Inc(1)
log.Error("peer.handleIncoming", "err", err)
}
return err return err
} }
} }

@ -31,7 +31,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/simulations/adapters" "github.com/ethereum/go-ethereum/p2p/simulations/adapters"
) )
var dialBanTimeout = 200 * time.Millisecond var DialBanTimeout = 200 * time.Millisecond
// NetworkConfig defines configuration options for starting a Network // NetworkConfig defines configuration options for starting a Network
type NetworkConfig struct { type NetworkConfig struct {
@ -78,41 +78,25 @@ func (net *Network) Events() *event.Feed {
return &net.events return &net.events
} }
// NewNode adds a new node to the network with a random ID
func (net *Network) NewNode() (*Node, error) {
conf := adapters.RandomNodeConfig()
conf.Services = []string{net.DefaultService}
return net.NewNodeWithConfig(conf)
}
// NewNodeWithConfig adds a new node to the network with the given config, // NewNodeWithConfig adds a new node to the network with the given config,
// returning an error if a node with the same ID or name already exists // returning an error if a node with the same ID or name already exists
func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) { func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) {
net.lock.Lock() net.lock.Lock()
defer net.lock.Unlock() defer net.lock.Unlock()
// create a random ID and PrivateKey if not set
if conf.ID == (discover.NodeID{}) {
c := adapters.RandomNodeConfig()
conf.ID = c.ID
conf.PrivateKey = c.PrivateKey
}
id := conf.ID
if conf.Reachable == nil { if conf.Reachable == nil {
conf.Reachable = func(otherID discover.NodeID) bool { conf.Reachable = func(otherID discover.NodeID) bool {
_, err := net.InitConn(conf.ID, otherID) _, err := net.InitConn(conf.ID, otherID)
return err == nil if err != nil && bytes.Compare(conf.ID.Bytes(), otherID.Bytes()) < 0 {
return false
}
return true
} }
} }
// assign a name to the node if not set
if conf.Name == "" {
conf.Name = fmt.Sprintf("node%02d", len(net.Nodes)+1)
}
// check the node doesn't already exist // check the node doesn't already exist
if node := net.getNode(id); node != nil { if node := net.getNode(conf.ID); node != nil {
return nil, fmt.Errorf("node with ID %q already exists", id) return nil, fmt.Errorf("node with ID %q already exists", conf.ID)
} }
if node := net.getNodeByName(conf.Name); node != nil { if node := net.getNodeByName(conf.Name); node != nil {
return nil, fmt.Errorf("node with name %q already exists", conf.Name) return nil, fmt.Errorf("node with name %q already exists", conf.Name)
@ -132,8 +116,8 @@ func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error)
Node: adapterNode, Node: adapterNode,
Config: conf, Config: conf,
} }
log.Trace(fmt.Sprintf("node %v created", id)) log.Trace(fmt.Sprintf("node %v created", conf.ID))
net.nodeMap[id] = len(net.Nodes) net.nodeMap[conf.ID] = len(net.Nodes)
net.Nodes = append(net.Nodes, node) net.Nodes = append(net.Nodes, node)
// emit a "control" event // emit a "control" event
@ -181,7 +165,9 @@ func (net *Network) Start(id discover.NodeID) error {
// startWithSnapshots starts the node with the given ID using the give // startWithSnapshots starts the node with the given ID using the give
// snapshots // snapshots
func (net *Network) startWithSnapshots(id discover.NodeID, snapshots map[string][]byte) error { func (net *Network) startWithSnapshots(id discover.NodeID, snapshots map[string][]byte) error {
node := net.GetNode(id) net.lock.Lock()
defer net.lock.Unlock()
node := net.getNode(id)
if node == nil { if node == nil {
return fmt.Errorf("node %v does not exist", id) return fmt.Errorf("node %v does not exist", id)
} }
@ -220,9 +206,13 @@ func (net *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEve
// assume the node is now down // assume the node is now down
net.lock.Lock() net.lock.Lock()
defer net.lock.Unlock()
node := net.getNode(id) node := net.getNode(id)
if node == nil {
log.Error("Can not find node for id", "id", id)
return
}
node.Up = false node.Up = false
net.lock.Unlock()
net.events.Send(NewEvent(node)) net.events.Send(NewEvent(node))
}() }()
for { for {
@ -259,7 +249,9 @@ func (net *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEve
// Stop stops the node with the given ID // Stop stops the node with the given ID
func (net *Network) Stop(id discover.NodeID) error { func (net *Network) Stop(id discover.NodeID) error {
node := net.GetNode(id) net.lock.Lock()
defer net.lock.Unlock()
node := net.getNode(id)
if node == nil { if node == nil {
return fmt.Errorf("node %v does not exist", id) return fmt.Errorf("node %v does not exist", id)
} }
@ -312,7 +304,9 @@ func (net *Network) Disconnect(oneID, otherID discover.NodeID) error {
// DidConnect tracks the fact that the "one" node connected to the "other" node // DidConnect tracks the fact that the "one" node connected to the "other" node
func (net *Network) DidConnect(one, other discover.NodeID) error { func (net *Network) DidConnect(one, other discover.NodeID) error {
conn, err := net.GetOrCreateConn(one, other) net.lock.Lock()
defer net.lock.Unlock()
conn, err := net.getOrCreateConn(one, other)
if err != nil { if err != nil {
return fmt.Errorf("connection between %v and %v does not exist", one, other) return fmt.Errorf("connection between %v and %v does not exist", one, other)
} }
@ -327,7 +321,9 @@ func (net *Network) DidConnect(one, other discover.NodeID) error {
// DidDisconnect tracks the fact that the "one" node disconnected from the // DidDisconnect tracks the fact that the "one" node disconnected from the
// "other" node // "other" node
func (net *Network) DidDisconnect(one, other discover.NodeID) error { func (net *Network) DidDisconnect(one, other discover.NodeID) error {
conn := net.GetConn(one, other) net.lock.Lock()
defer net.lock.Unlock()
conn := net.getConn(one, other)
if conn == nil { if conn == nil {
return fmt.Errorf("connection between %v and %v does not exist", one, other) return fmt.Errorf("connection between %v and %v does not exist", one, other)
} }
@ -335,7 +331,7 @@ func (net *Network) DidDisconnect(one, other discover.NodeID) error {
return fmt.Errorf("%v and %v already disconnected", one, other) return fmt.Errorf("%v and %v already disconnected", one, other)
} }
conn.Up = false conn.Up = false
conn.initiated = time.Now().Add(-dialBanTimeout) conn.initiated = time.Now().Add(-DialBanTimeout)
net.events.Send(NewEvent(conn)) net.events.Send(NewEvent(conn))
return nil return nil
} }
@ -476,16 +472,19 @@ func (net *Network) InitConn(oneID, otherID discover.NodeID) (*Conn, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if time.Since(conn.initiated) < dialBanTimeout {
return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID)
}
if conn.Up { if conn.Up {
return nil, fmt.Errorf("%v and %v already connected", oneID, otherID) return nil, fmt.Errorf("%v and %v already connected", oneID, otherID)
} }
if time.Since(conn.initiated) < DialBanTimeout {
return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID)
}
err = conn.nodesUp() err = conn.nodesUp()
if err != nil { if err != nil {
log.Trace(fmt.Sprintf("nodes not up: %v", err))
return nil, fmt.Errorf("nodes not up: %v", err) return nil, fmt.Errorf("nodes not up: %v", err)
} }
log.Debug("InitConn - connection initiated")
conn.initiated = time.Now() conn.initiated = time.Now()
return conn, nil return conn, nil
} }

@ -91,7 +91,9 @@ func (s *ProtocolSession) trigger(trig Trigger) error {
errc := make(chan error) errc := make(chan error)
go func() { go func() {
log.Trace(fmt.Sprintf("trigger %v (%v)....", trig.Msg, trig.Code))
errc <- mockNode.Trigger(&trig) errc <- mockNode.Trigger(&trig)
log.Trace(fmt.Sprintf("triggered %v (%v)", trig.Msg, trig.Code))
}() }()
t := trig.Timeout t := trig.Timeout

@ -0,0 +1,35 @@
# Core team members
Viktor Trón - @zelig
Louis Holbrook - @nolash
Lewis Marshall - @lmars
Anton Evangelatov - @nonsense
Janoš Guljaš - @janos
Balint Gabor - @gbalint
Elad Nachmias - @justelad
Daniel A. Nagy - @nagydani
Aron Fischer - @homotopycolimit
Fabio Barone - @holisticode
Zahoor Mohamed - @jmozah
Zsolt Felföldi - @zsfelfoldi
# External contributors
Kiel Barry
Gary Rong
Jared Wasinger
Leon Stanko
Javier Peletier [epiclabs.io]
Bartek Borkowski [tungsten-labs.com]
Shane Howley [mainframe.com]
Doug Leonard [mainframe.com]
Ivan Daniluk [status.im]
Felix Lange [EF]
Martin Holst Swende [EF]
Guillaume Ballet [EF]
ligi [EF]
Christopher Dro [blick-labs.com]
Sergii Bomko [ledgerleopard.com]
Domino Valdano
Rafael Matias
Coogan Brennan

@ -0,0 +1,26 @@
# Ownership by go packages
swarm
├── api ─────────────────── ethersphere
├── bmt ─────────────────── @zelig
├── dev ─────────────────── @lmars
├── fuse ────────────────── @jmozah, @holisticode
├── grafana_dashboards ──── @nonsense
├── metrics ─────────────── @nonsense, @holisticode
├── multihash ───────────── @nolash
├── network ─────────────── ethersphere
│ ├── bitvector ───────── @zelig, @janos, @gbalint
│ ├── priorityqueue ───── @zelig, @janos, @gbalint
│ ├── simulations ─────── @zelig
│ └── stream ──────────── @janos, @zelig, @gbalint, @holisticode, @justelad
│ ├── intervals ───── @janos
│ └── testing ─────── @zelig
├── pot ─────────────────── @zelig
├── pss ─────────────────── @nolash, @zelig, @nonsense
├── services ────────────── @zelig
├── state ───────────────── @justelad
├── storage ─────────────── ethersphere
│ ├── encryption ──────── @gbalint, @zelig, @nagydani
│ ├── mock ────────────── @janos
│ └── mru ─────────────── @nolash
└── testutil ────────────── @lmars

@ -17,13 +17,13 @@
package api package api
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"math/big"
"net/http" "net/http"
"path" "path"
"regexp"
"strings" "strings"
"sync"
"bytes" "bytes"
"mime" "mime"
@ -31,14 +31,15 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/contracts/ens"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/multihash"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/mru"
) )
var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}")
//setup metrics
var ( var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil) apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil) apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
@ -46,7 +47,7 @@ var (
apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil) apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil)
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil) apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil) apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
apiGetHttp300 = metrics.NewRegisteredCounter("api.get.http.300", nil) apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil) apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil)
apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil) apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil)
apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil) apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil)
@ -55,22 +56,33 @@ var (
apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil) apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil)
apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil) apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil)
apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil) apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil)
apiGetInvalid = metrics.NewRegisteredCounter("api.get.invalid", nil)
) )
// Resolver interface resolve a domain name to a hash using ENS
type Resolver interface { type Resolver interface {
Resolve(string) (common.Hash, error) Resolve(string) (common.Hash, error)
} }
// ResolveValidator is used to validate the contained Resolver
type ResolveValidator interface {
Resolver
Owner(node [32]byte) (common.Address, error)
HeaderByNumber(context.Context, *big.Int) (*types.Header, error)
}
// NoResolverError is returned by MultiResolver.Resolve if no resolver // NoResolverError is returned by MultiResolver.Resolve if no resolver
// can be found for the address. // can be found for the address.
type NoResolverError struct { type NoResolverError struct {
TLD string TLD string
} }
// NewNoResolverError creates a NoResolverError for the given top level domain
func NewNoResolverError(tld string) *NoResolverError { func NewNoResolverError(tld string) *NoResolverError {
return &NoResolverError{TLD: tld} return &NoResolverError{TLD: tld}
} }
// Error NoResolverError implements error
func (e *NoResolverError) Error() string { func (e *NoResolverError) Error() string {
if e.TLD == "" { if e.TLD == "" {
return "no ENS resolver" return "no ENS resolver"
@ -82,7 +94,8 @@ func (e *NoResolverError) Error() string {
// Each TLD can have multiple resolvers, and the resoluton from the // Each TLD can have multiple resolvers, and the resoluton from the
// first one in the sequence will be returned. // first one in the sequence will be returned.
type MultiResolver struct { type MultiResolver struct {
resolvers map[string][]Resolver resolvers map[string][]ResolveValidator
nameHash func(string) common.Hash
} }
// MultiResolverOption sets options for MultiResolver and is used as // MultiResolverOption sets options for MultiResolver and is used as
@ -93,16 +106,24 @@ type MultiResolverOption func(*MultiResolver)
// for a specific TLD. If TLD is an empty string, the resolver will be added // for a specific TLD. If TLD is an empty string, the resolver will be added
// to the list of default resolver, the ones that will be used for resolution // to the list of default resolver, the ones that will be used for resolution
// of addresses which do not have their TLD resolver specified. // of addresses which do not have their TLD resolver specified.
func MultiResolverOptionWithResolver(r Resolver, tld string) MultiResolverOption { func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolverOption {
return func(m *MultiResolver) { return func(m *MultiResolver) {
m.resolvers[tld] = append(m.resolvers[tld], r) m.resolvers[tld] = append(m.resolvers[tld], r)
} }
} }
// MultiResolverOptionWithNameHash is unused at the time of this writing
func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption {
return func(m *MultiResolver) {
m.nameHash = nameHash
}
}
// NewMultiResolver creates a new instance of MultiResolver. // NewMultiResolver creates a new instance of MultiResolver.
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
m = &MultiResolver{ m = &MultiResolver{
resolvers: make(map[string][]Resolver), resolvers: make(map[string][]ResolveValidator),
nameHash: ens.EnsNode,
} }
for _, o := range opts { for _, o := range opts {
o(m) o(m)
@ -114,18 +135,10 @@ func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
// If there are more default Resolvers, or for a specific TLD, // If there are more default Resolvers, or for a specific TLD,
// the Hash from the the first one which does not return error // the Hash from the the first one which does not return error
// will be returned. // will be returned.
func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) { func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
rs := m.resolvers[""] rs, err := m.getResolveValidator(addr)
tld := path.Ext(addr) if err != nil {
if tld != "" { return h, err
tld = tld[1:]
rstld, ok := m.resolvers[tld]
if ok {
rs = rstld
}
}
if rs == nil {
return h, NewNoResolverError(tld)
} }
for _, r := range rs { for _, r := range rs {
h, err = r.Resolve(addr) h, err = r.Resolve(addr)
@ -136,104 +149,171 @@ func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) {
return return
} }
// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address
func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) {
rs, err := m.getResolveValidator(name)
if err != nil {
return false, err
}
var addr common.Address
for _, r := range rs {
addr, err = r.Owner(m.nameHash(name))
// we hide the error if it is not for the last resolver we check
if err == nil {
return addr == address, nil
}
}
return false, err
}
// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number
func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) {
rs, err := m.getResolveValidator(name)
if err != nil {
return nil, err
}
for _, r := range rs {
var header *types.Header
header, err = r.HeaderByNumber(ctx, blockNr)
// we hide the error if it is not for the last resolver we check
if err == nil {
return header, nil
}
}
return nil, err
}
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
rs := m.resolvers[""]
tld := path.Ext(name)
if tld != "" {
tld = tld[1:]
rstld, ok := m.resolvers[tld]
if ok {
return rstld, nil
}
}
if len(rs) == 0 {
return rs, NewNoResolverError(tld)
}
return rs, nil
}
// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses
func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) {
m.nameHash = nameHash
}
/* /*
Api implements webserver/file system related content storage and retrieval API implements webserver/file system related content storage and retrieval
on top of the dpa on top of the FileStore
it is the public interface of the dpa which is included in the ethereum stack it is the public interface of the FileStore which is included in the ethereum stack
*/ */
type Api struct { type API struct {
dpa *storage.DPA resource *mru.Handler
dns Resolver fileStore *storage.FileStore
dns Resolver
} }
//the api constructor initialises // NewAPI the api constructor initialises a new API instance.
func NewApi(dpa *storage.DPA, dns Resolver) (self *Api) { func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Handler) (self *API) {
self = &Api{ self = &API{
dpa: dpa, fileStore: fileStore,
dns: dns, dns: dns,
resource: resourceHandler,
} }
return return
} }
// to be used only in TEST // Upload to be used only in TEST
func (self *Api) Upload(uploadDir, index string) (hash string, err error) { func (a *API) Upload(uploadDir, index string, toEncrypt bool) (hash string, err error) {
fs := NewFileSystem(self) fs := NewFileSystem(a)
hash, err = fs.Upload(uploadDir, index) hash, err = fs.Upload(uploadDir, index, toEncrypt)
return hash, err return hash, err
} }
// DPA reader API // Retrieve FileStore reader API
func (self *Api) Retrieve(key storage.Key) storage.LazySectionReader { func (a *API) Retrieve(addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) {
return self.dpa.Retrieve(key) return a.fileStore.Retrieve(addr)
} }
func (self *Api) Store(data io.Reader, size int64, wg *sync.WaitGroup) (key storage.Key, err error) { // Store wraps the Store API call of the embedded FileStore
return self.dpa.Store(data, size, wg, nil) func (a *API) Store(data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(), err error) {
log.Debug("api.store", "size", size)
return a.fileStore.Store(data, size, toEncrypt)
} }
// ErrResolve is returned when an URI cannot be resolved from ENS.
type ErrResolve error type ErrResolve error
// DNS Resolver // Resolve resolves a URI to an Address using the MultiResolver.
func (self *Api) Resolve(uri *URI) (storage.Key, error) { func (a *API) Resolve(uri *URI) (storage.Address, error) {
apiResolveCount.Inc(1) apiResolveCount.Inc(1)
log.Trace(fmt.Sprintf("Resolving : %v", uri.Addr)) log.Trace("resolving", "uri", uri.Addr)
// if the URI is immutable, check if the address is a hash // if the URI is immutable, check if the address looks like a hash
isHash := hashMatcher.MatchString(uri.Addr) if uri.Immutable() {
if uri.Immutable() || uri.DeprecatedImmutable() { key := uri.Address()
if !isHash { if key == nil {
return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr) return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr)
} }
return common.Hex2Bytes(uri.Addr), nil return key, nil
} }
// if DNS is not configured, check if the address is a hash // if DNS is not configured, check if the address is a hash
if self.dns == nil { if a.dns == nil {
if !isHash { key := uri.Address()
if key == nil {
apiResolveFail.Inc(1) apiResolveFail.Inc(1)
return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr) return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr)
} }
return common.Hex2Bytes(uri.Addr), nil return key, nil
} }
// try and resolve the address // try and resolve the address
resolved, err := self.dns.Resolve(uri.Addr) resolved, err := a.dns.Resolve(uri.Addr)
if err == nil { if err == nil {
return resolved[:], nil return resolved[:], nil
} else if !isHash { }
key := uri.Address()
if key == nil {
apiResolveFail.Inc(1) apiResolveFail.Inc(1)
return nil, err return nil, err
} }
return common.Hex2Bytes(uri.Addr), nil return key, nil
} }
// Put provides singleton manifest creation on top of dpa store // Put provides singleton manifest creation on top of FileStore store
func (self *Api) Put(content, contentType string) (storage.Key, error) { func (a *API) Put(content, contentType string, toEncrypt bool) (k storage.Address, wait func(), err error) {
apiPutCount.Inc(1) apiPutCount.Inc(1)
r := strings.NewReader(content) r := strings.NewReader(content)
wg := &sync.WaitGroup{} key, waitContent, err := a.fileStore.Store(r, int64(len(content)), toEncrypt)
key, err := self.dpa.Store(r, int64(len(content)), wg, nil)
if err != nil { if err != nil {
apiPutFail.Inc(1) apiPutFail.Inc(1)
return nil, err return nil, nil, err
} }
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
r = strings.NewReader(manifest) r = strings.NewReader(manifest)
key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil) key, waitManifest, err := a.fileStore.Store(r, int64(len(manifest)), toEncrypt)
if err != nil { if err != nil {
apiPutFail.Inc(1) apiPutFail.Inc(1)
return nil, err return nil, nil, err
} }
wg.Wait() return key, func() {
return key, nil waitContent()
waitManifest()
}, nil
} }
// Get uses iterative manifest retrieval and prefix matching // Get uses iterative manifest retrieval and prefix matching
// to resolve basePath to content using dpa retrieve // to resolve basePath to content using FileStore retrieve
// it returns a section reader, mimeType, status and an error // it returns a section reader, mimeType, status, the key of the actual content and an error
func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionReader, mimeType string, status int, err error) { func (a *API) Get(manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) {
log.Debug("api.get", "key", manifestAddr, "path", path)
apiGetCount.Inc(1) apiGetCount.Inc(1)
trie, err := loadManifest(self.dpa, key, nil) trie, err := loadManifest(a.fileStore, manifestAddr, nil)
if err != nil { if err != nil {
apiGetNotFound.Inc(1) apiGetNotFound.Inc(1)
status = http.StatusNotFound status = http.StatusNotFound
@ -241,34 +321,111 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe
return return
} }
log.Trace(fmt.Sprintf("getEntry(%s)", path)) log.Debug("trie getting entry", "key", manifestAddr, "path", path)
entry, _ := trie.getEntry(path) entry, _ := trie.getEntry(path)
if entry != nil { if entry != nil {
key = common.Hex2Bytes(entry.Hash) log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash)
// we need to do some extra work if this is a mutable resource manifest
if entry.ContentType == ResourceContentType {
// get the resource root chunk key
log.Trace("resource type", "key", manifestAddr, "hash", entry.Hash)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rsrc, err := a.resource.Load(storage.Address(common.FromHex(entry.Hash)))
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Debug(fmt.Sprintf("get resource content error: %v", err))
return reader, mimeType, status, nil, err
}
// use this key to retrieve the latest update
rsrc, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, &mru.LookupParams{})
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Debug(fmt.Sprintf("get resource content error: %v", err))
return reader, mimeType, status, nil, err
}
// if it's multihash, we will transparently serve the content this multihash points to
// \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper
if rsrc.Multihash {
// get the data of the update
_, rsrcData, err := a.resource.GetContent(rsrc.NameHash().Hex())
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Warn(fmt.Sprintf("get resource content error: %v", err))
return reader, mimeType, status, nil, err
}
// validate that data as multihash
decodedMultihash, err := multihash.FromMultihash(rsrcData)
if err != nil {
apiGetInvalid.Inc(1)
status = http.StatusUnprocessableEntity
log.Warn("invalid resource multihash", "err", err)
return reader, mimeType, status, nil, err
}
manifestAddr = storage.Address(decodedMultihash)
log.Trace("resource is multihash", "key", manifestAddr)
// get the manifest the multihash digest points to
trie, err := loadManifest(a.fileStore, manifestAddr, nil)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err))
return reader, mimeType, status, nil, err
}
// finally, get the manifest entry
// it will always be the entry on path ""
entry, _ = trie.getEntry(path)
if entry == nil {
status = http.StatusNotFound
apiGetNotFound.Inc(1)
err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path)
log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path)
return reader, mimeType, status, nil, err
}
} else {
// data is returned verbatim since it's not a multihash
return rsrc, "application/octet-stream", http.StatusOK, nil, nil
}
}
// regardless of resource update manifests or normal manifests we will converge at this point
// get the key the manifest entry points to and serve it if it's unambiguous
contentAddr = common.Hex2Bytes(entry.Hash)
status = entry.Status status = entry.Status
if status == http.StatusMultipleChoices { if status == http.StatusMultipleChoices {
apiGetHttp300.Inc(1) apiGetHTTP300.Inc(1)
return return nil, entry.ContentType, status, contentAddr, err
} else {
mimeType = entry.ContentType
log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType))
reader = self.dpa.Retrieve(key)
} }
mimeType = entry.ContentType
log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType)
reader, _ = a.fileStore.Retrieve(contentAddr)
} else { } else {
// no entry found
status = http.StatusNotFound status = http.StatusNotFound
apiGetNotFound.Inc(1) apiGetNotFound.Inc(1)
err = fmt.Errorf("manifest entry for '%s' not found", path) err = fmt.Errorf("manifest entry for '%s' not found", path)
log.Warn(fmt.Sprintf("%v", err)) log.Trace("manifest entry not found", "key", contentAddr, "path", path)
} }
return return
} }
func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) (storage.Key, error) { // Modify loads manifest and checks the content hash before recalculating and storing the manifest.
func (a *API) Modify(addr storage.Address, path, contentHash, contentType string) (storage.Address, error) {
apiModifyCount.Inc(1) apiModifyCount.Inc(1)
quitC := make(chan bool) quitC := make(chan bool)
trie, err := loadManifest(self.dpa, key, quitC) trie, err := loadManifest(a.fileStore, addr, quitC)
if err != nil { if err != nil {
apiModifyFail.Inc(1) apiModifyFail.Inc(1)
return nil, err return nil, err
@ -288,10 +445,11 @@ func (self *Api) Modify(key storage.Key, path, contentHash, contentType string)
apiModifyFail.Inc(1) apiModifyFail.Inc(1)
return nil, err return nil, err
} }
return trie.hash, nil return trie.ref, nil
} }
func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Key, string, error) { // AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm.
func (a *API) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) {
apiAddFileCount.Inc(1) apiAddFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash) uri, err := Parse("bzz:/" + mhash)
@ -299,7 +457,7 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver
apiAddFileFail.Inc(1) apiAddFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
mkey, err := self.Resolve(uri) mkey, err := a.Resolve(uri)
if err != nil { if err != nil {
apiAddFileFail.Inc(1) apiAddFileFail.Inc(1)
return nil, "", err return nil, "", err
@ -318,7 +476,7 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver
ModTime: time.Now(), ModTime: time.Now(),
} }
mw, err := self.NewManifestWriter(mkey, nil) mw, err := a.NewManifestWriter(mkey, nil)
if err != nil { if err != nil {
apiAddFileFail.Inc(1) apiAddFileFail.Inc(1)
return nil, "", err return nil, "", err
@ -341,7 +499,8 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver
} }
func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) { // RemoveFile removes a file entry in a manifest.
func (a *API) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) {
apiRmFileCount.Inc(1) apiRmFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash) uri, err := Parse("bzz:/" + mhash)
@ -349,7 +508,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin
apiRmFileFail.Inc(1) apiRmFileFail.Inc(1)
return "", err return "", err
} }
mkey, err := self.Resolve(uri) mkey, err := a.Resolve(uri)
if err != nil { if err != nil {
apiRmFileFail.Inc(1) apiRmFileFail.Inc(1)
return "", err return "", err
@ -360,7 +519,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin
path = path[1:] path = path[1:]
} }
mw, err := self.NewManifestWriter(mkey, nil) mw, err := a.NewManifestWriter(mkey, nil)
if err != nil { if err != nil {
apiRmFileFail.Inc(1) apiRmFileFail.Inc(1)
return "", err return "", err
@ -382,7 +541,8 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin
return newMkey.String(), nil return newMkey.String(), nil
} }
func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldKey storage.Key, offset int64, addSize int64, nameresolver bool) (storage.Key, string, error) { // AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm.
func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) {
apiAppendFileCount.Inc(1) apiAppendFileCount.Inc(1)
buffSize := offset + addSize buffSize := offset + addSize
@ -392,7 +552,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
buf := make([]byte, buffSize) buf := make([]byte, buffSize)
oldReader := self.Retrieve(oldKey) oldReader, _ := a.Retrieve(oldAddr)
io.ReadAtLeast(oldReader, buf, int(offset)) io.ReadAtLeast(oldReader, buf, int(offset))
newReader := bytes.NewReader(content) newReader := bytes.NewReader(content)
@ -406,7 +566,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
totalSize := int64(len(buf)) totalSize := int64(len(buf))
// TODO(jmozah): to append using pyramid chunker when it is ready // TODO(jmozah): to append using pyramid chunker when it is ready
//oldReader := self.Retrieve(oldKey) //oldReader := a.Retrieve(oldKey)
//newReader := bytes.NewReader(content) //newReader := bytes.NewReader(content)
//combinedReader := io.MultiReader(oldReader, newReader) //combinedReader := io.MultiReader(oldReader, newReader)
@ -415,7 +575,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
apiAppendFileFail.Inc(1) apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
} }
mkey, err := self.Resolve(uri) mkey, err := a.Resolve(uri)
if err != nil { if err != nil {
apiAppendFileFail.Inc(1) apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
@ -426,7 +586,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
path = path[1:] path = path[1:]
} }
mw, err := self.NewManifestWriter(mkey, nil) mw, err := a.NewManifestWriter(mkey, nil)
if err != nil { if err != nil {
apiAppendFileFail.Inc(1) apiAppendFileFail.Inc(1)
return nil, "", err return nil, "", err
@ -463,21 +623,22 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte
} }
func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) { // BuildDirectoryTree used by swarmfs_unix
func (a *API) BuildDirectoryTree(mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) {
uri, err := Parse("bzz:/" + mhash) uri, err := Parse("bzz:/" + mhash)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
key, err = self.Resolve(uri) addr, err = a.Resolve(uri)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
quitC := make(chan bool) quitC := make(chan bool)
rootTrie, err := loadManifest(self.dpa, key, quitC) rootTrie, err := loadManifest(a.fileStore, addr, quitC)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("can't load manifest %v: %v", key.String(), err) return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err)
} }
manifestEntryMap = map[string]*manifestTrieEntry{} manifestEntryMap = map[string]*manifestTrieEntry{}
@ -486,7 +647,94 @@ func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storag
}) })
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("list with prefix failed %v: %v", key.String(), err) return nil, nil, fmt.Errorf("list with prefix failed %v: %v", addr.String(), err)
}
return addr, manifestEntryMap, nil
}
// ResourceLookup Looks up mutable resource updates at specific periods and versions
func (a *API) ResourceLookup(ctx context.Context, addr storage.Address, period uint32, version uint32, maxLookup *mru.LookupParams) (string, []byte, error) {
var err error
rsrc, err := a.resource.Load(addr)
if err != nil {
return "", nil, err
}
if version != 0 {
if period == 0 {
return "", nil, mru.NewError(mru.ErrInvalidValue, "Period can't be 0")
}
_, err = a.resource.LookupVersion(ctx, rsrc.NameHash(), period, version, true, maxLookup)
} else if period != 0 {
_, err = a.resource.LookupHistorical(ctx, rsrc.NameHash(), period, true, maxLookup)
} else {
_, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, maxLookup)
}
if err != nil {
return "", nil, err
}
var data []byte
_, data, err = a.resource.GetContent(rsrc.NameHash().Hex())
if err != nil {
return "", nil, err
}
return rsrc.Name(), data, nil
}
// ResourceCreate creates Resource and returns its key
func (a *API) ResourceCreate(ctx context.Context, name string, frequency uint64) (storage.Address, error) {
key, _, err := a.resource.New(ctx, name, frequency)
if err != nil {
return nil, err
}
return key, nil
}
// ResourceUpdateMultihash updates a Mutable Resource and marks the update's content to be of multihash type, which will be recognized upon retrieval.
// It will fail if the data is not a valid multihash.
func (a *API) ResourceUpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
return a.resourceUpdate(ctx, name, data, true)
}
// ResourceUpdate updates a Mutable Resource with arbitrary data.
// Upon retrieval the update will be retrieved verbatim as bytes.
func (a *API) ResourceUpdate(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) {
return a.resourceUpdate(ctx, name, data, false)
}
func (a *API) resourceUpdate(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, uint32, uint32, error) {
var addr storage.Address
var err error
if multihash {
addr, err = a.resource.UpdateMultihash(ctx, name, data)
} else {
addr, err = a.resource.Update(ctx, name, data)
} }
return key, manifestEntryMap, nil period, _ := a.resource.GetLastPeriod(name)
version, _ := a.resource.GetVersion(name)
return addr, period, version, err
}
// ResourceHashSize returned the size of the digest produced by the Mutable Resource hashing function
func (a *API) ResourceHashSize() int {
return a.resource.HashSize
}
// ResourceIsValidated checks if the Mutable Resource has an active content validator.
func (a *API) ResourceIsValidated() bool {
return a.resource.IsValidated()
}
// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk.
func (a *API) ResolveResourceManifest(addr storage.Address) (storage.Address, error) {
trie, err := loadManifest(a.fileStore, addr, nil)
if err != nil {
return nil, fmt.Errorf("cannot load resource manifest: %v", err)
}
entry, _ := trie.getEntry("")
if entry.ContentType != ResourceContentType {
return nil, fmt.Errorf("not a resource manifest: %s", addr)
}
return storage.Address(common.FromHex(entry.Hash)), nil
} }

@ -17,33 +17,34 @@
package api package api
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math/big"
"os" "os"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
func testApi(t *testing.T, f func(*Api)) { func testAPI(t *testing.T, f func(*API, bool)) {
datadir, err := ioutil.TempDir("", "bzz-test") datadir, err := ioutil.TempDir("", "bzz-test")
if err != nil { if err != nil {
t.Fatalf("unable to create temp dir: %v", err) t.Fatalf("unable to create temp dir: %v", err)
} }
os.RemoveAll(datadir)
defer os.RemoveAll(datadir) defer os.RemoveAll(datadir)
dpa, err := storage.NewLocalDPA(datadir) fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
if err != nil { if err != nil {
return return
} }
api := NewApi(dpa, nil) api := NewAPI(fileStore, nil, nil)
dpa.Start() f(api, false)
f(api) f(api, true)
dpa.Stop()
} }
type testResponse struct { type testResponse struct {
@ -82,10 +83,9 @@ func expResponse(content string, mimeType string, status int) *Response {
return &Response{mimeType, status, int64(len(content)), content} return &Response{mimeType, status, int64(len(content)), content}
} }
// func testGet(t *testing.T, api *Api, bzzhash string) *testResponse { func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse {
func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse { addr := storage.Address(common.Hex2Bytes(bzzhash))
key := storage.Key(common.Hex2Bytes(bzzhash)) reader, mimeType, status, _, err := api.Get(addr, path)
reader, mimeType, status, err := api.Get(key, path)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -106,27 +106,28 @@ func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse {
} }
func TestApiPut(t *testing.T) { func TestApiPut(t *testing.T) {
testApi(t, func(api *Api) { testAPI(t, func(api *API, toEncrypt bool) {
content := "hello" content := "hello"
exp := expResponse(content, "text/plain", 0) exp := expResponse(content, "text/plain", 0)
// exp := expResponse([]byte(content), "text/plain", 0) // exp := expResponse([]byte(content), "text/plain", 0)
key, err := api.Put(content, exp.MimeType) addr, wait, err := api.Put(content, exp.MimeType, toEncrypt)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
resp := testGet(t, api, key.String(), "") wait()
resp := testGet(t, api, addr.Hex(), "")
checkResponse(t, resp, exp) checkResponse(t, resp, exp)
}) })
} }
// testResolver implements the Resolver interface and either returns the given // testResolver implements the Resolver interface and either returns the given
// hash if it is set, or returns a "name not found" error // hash if it is set, or returns a "name not found" error
type testResolver struct { type testResolveValidator struct {
hash *common.Hash hash *common.Hash
} }
func newTestResolver(addr string) *testResolver { func newTestResolveValidator(addr string) *testResolveValidator {
r := &testResolver{} r := &testResolveValidator{}
if addr != "" { if addr != "" {
hash := common.HexToHash(addr) hash := common.HexToHash(addr)
r.hash = &hash r.hash = &hash
@ -134,21 +135,28 @@ func newTestResolver(addr string) *testResolver {
return r return r
} }
func (t *testResolver) Resolve(addr string) (common.Hash, error) { func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) {
if t.hash == nil { if t.hash == nil {
return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr) return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr)
} }
return *t.hash, nil return *t.hash, nil
} }
func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) {
return
}
func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) {
return
}
// TestAPIResolve tests resolving URIs which can either contain content hashes // TestAPIResolve tests resolving URIs which can either contain content hashes
// or ENS names // or ENS names
func TestAPIResolve(t *testing.T) { func TestAPIResolve(t *testing.T) {
ensAddr := "swarm.eth" ensAddr := "swarm.eth"
hashAddr := "1111111111111111111111111111111111111111111111111111111111111111" hashAddr := "1111111111111111111111111111111111111111111111111111111111111111"
resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222" resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222"
doesResolve := newTestResolver(resolvedAddr) doesResolve := newTestResolveValidator(resolvedAddr)
doesntResolve := newTestResolver("") doesntResolve := newTestResolveValidator("")
type test struct { type test struct {
desc string desc string
@ -213,7 +221,7 @@ func TestAPIResolve(t *testing.T) {
} }
for _, x := range tests { for _, x := range tests {
t.Run(x.desc, func(t *testing.T) { t.Run(x.desc, func(t *testing.T) {
api := &Api{dns: x.dns} api := &API{dns: x.dns}
uri := &URI{Addr: x.addr, Scheme: "bzz"} uri := &URI{Addr: x.addr, Scheme: "bzz"}
if x.immutable { if x.immutable {
uri.Scheme = "bzz-immutable" uri.Scheme = "bzz-immutable"
@ -239,15 +247,15 @@ func TestAPIResolve(t *testing.T) {
} }
func TestMultiResolver(t *testing.T) { func TestMultiResolver(t *testing.T) {
doesntResolve := newTestResolver("") doesntResolve := newTestResolveValidator("")
ethAddr := "swarm.eth" ethAddr := "swarm.eth"
ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222" ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222"
ethResolve := newTestResolver(ethHash) ethResolve := newTestResolveValidator(ethHash)
testAddr := "swarm.test" testAddr := "swarm.test"
testHash := "0x1111111111111111111111111111111111111111111111111111111111111111" testHash := "0x1111111111111111111111111111111111111111111111111111111111111111"
testResolve := newTestResolver(testHash) testResolve := newTestResolveValidator(testHash)
tests := []struct { tests := []struct {
desc string desc string

@ -30,6 +30,7 @@ import (
"net/textproto" "net/textproto"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"strconv" "strconv"
"strings" "strings"
@ -52,12 +53,17 @@ type Client struct {
Gateway string Gateway string
} }
// UploadRaw uploads raw data to swarm and returns the resulting hash // UploadRaw uploads raw data to swarm and returns the resulting hash. If toEncrypt is true it
func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) { // uploads encrypted data
func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, error) {
if size <= 0 { if size <= 0 {
return "", errors.New("data size must be greater than zero") return "", errors.New("data size must be greater than zero")
} }
req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/", r) addr := ""
if toEncrypt {
addr = "encrypt"
}
req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/"+addr, r)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -77,18 +83,20 @@ func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) {
return string(data), nil return string(data), nil
} }
// DownloadRaw downloads raw data from swarm // DownloadRaw downloads raw data from swarm and it returns a ReadCloser and a bool whether the
func (c *Client) DownloadRaw(hash string) (io.ReadCloser, error) { // content was encrypted
func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) {
uri := c.Gateway + "/bzz-raw:/" + hash uri := c.Gateway + "/bzz-raw:/" + hash
res, err := http.DefaultClient.Get(uri) res, err := http.DefaultClient.Get(uri)
if err != nil { if err != nil {
return nil, err return nil, false, err
} }
if res.StatusCode != http.StatusOK { if res.StatusCode != http.StatusOK {
res.Body.Close() res.Body.Close()
return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) return nil, false, fmt.Errorf("unexpected HTTP status: %s", res.Status)
} }
return res.Body, nil isEncrypted := (res.Header.Get("X-Decrypted") == "true")
return res.Body, isEncrypted, nil
} }
// File represents a file in a swarm manifest and is used for uploading and // File represents a file in a swarm manifest and is used for uploading and
@ -125,11 +133,11 @@ func Open(path string) (*File, error) {
// (if the manifest argument is non-empty) or creates a new manifest containing // (if the manifest argument is non-empty) or creates a new manifest containing
// the file, returning the resulting manifest hash (the file will then be // the file, returning the resulting manifest hash (the file will then be
// available at bzz:/<hash>/<path>) // available at bzz:/<hash>/<path>)
func (c *Client) Upload(file *File, manifest string) (string, error) { func (c *Client) Upload(file *File, manifest string, toEncrypt bool) (string, error) {
if file.Size <= 0 { if file.Size <= 0 {
return "", errors.New("file size must be greater than zero") return "", errors.New("file size must be greater than zero")
} }
return c.TarUpload(manifest, &FileUploader{file}) return c.TarUpload(manifest, &FileUploader{file}, toEncrypt)
} }
// Download downloads a file with the given path from the swarm manifest with // Download downloads a file with the given path from the swarm manifest with
@ -159,14 +167,14 @@ func (c *Client) Download(hash, path string) (*File, error) {
// directory will then be available at bzz:/<hash>/path/to/file), with // directory will then be available at bzz:/<hash>/path/to/file), with
// the file specified in defaultPath being uploaded to the root of the manifest // the file specified in defaultPath being uploaded to the root of the manifest
// (i.e. bzz:/<hash>/) // (i.e. bzz:/<hash>/)
func (c *Client) UploadDirectory(dir, defaultPath, manifest string) (string, error) { func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bool) (string, error) {
stat, err := os.Stat(dir) stat, err := os.Stat(dir)
if err != nil { if err != nil {
return "", err return "", err
} else if !stat.IsDir() { } else if !stat.IsDir() {
return "", fmt.Errorf("not a directory: %s", dir) return "", fmt.Errorf("not a directory: %s", dir)
} }
return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}) return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}, toEncrypt)
} }
// DownloadDirectory downloads the files contained in a swarm manifest under // DownloadDirectory downloads the files contained in a swarm manifest under
@ -228,27 +236,109 @@ func (c *Client) DownloadDirectory(hash, path, destDir string) error {
} }
} }
// DownloadFile downloads a single file into the destination directory
// if the manifest entry does not specify a file name - it will fallback
// to the hash of the file as a filename
func (c *Client) DownloadFile(hash, path, dest string) error {
hasDestinationFilename := false
if stat, err := os.Stat(dest); err == nil {
hasDestinationFilename = !stat.IsDir()
} else {
if os.IsNotExist(err) {
// does not exist - should be created
hasDestinationFilename = true
} else {
return fmt.Errorf("could not stat path: %v", err)
}
}
manifestList, err := c.List(hash, path)
if err != nil {
return fmt.Errorf("could not list manifest: %v", err)
}
switch len(manifestList.Entries) {
case 0:
return fmt.Errorf("could not find path requested at manifest address. make sure the path you've specified is correct")
case 1:
//continue
default:
return fmt.Errorf("got too many matches for this path")
}
uri := c.Gateway + "/bzz:/" + hash + "/" + path
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected HTTP status: expected 200 OK, got %d", res.StatusCode)
}
filename := ""
if hasDestinationFilename {
filename = dest
} else {
// try to assert
re := regexp.MustCompile("[^/]+$") //everything after last slash
if results := re.FindAllString(path, -1); len(results) > 0 {
filename = results[len(results)-1]
} else {
if entry := manifestList.Entries[0]; entry.Path != "" && entry.Path != "/" {
filename = entry.Path
} else {
// assume hash as name if there's nothing from the command line
filename = hash
}
}
filename = filepath.Join(dest, filename)
}
filePath, err := filepath.Abs(filename)
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(filePath), 0777); err != nil {
return err
}
dst, err := os.Create(filename)
if err != nil {
return err
}
defer dst.Close()
_, err = io.Copy(dst, res.Body)
return err
}
// UploadManifest uploads the given manifest to swarm // UploadManifest uploads the given manifest to swarm
func (c *Client) UploadManifest(m *api.Manifest) (string, error) { func (c *Client) UploadManifest(m *api.Manifest, toEncrypt bool) (string, error) {
data, err := json.Marshal(m) data, err := json.Marshal(m)
if err != nil { if err != nil {
return "", err return "", err
} }
return c.UploadRaw(bytes.NewReader(data), int64(len(data))) return c.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt)
} }
// DownloadManifest downloads a swarm manifest // DownloadManifest downloads a swarm manifest
func (c *Client) DownloadManifest(hash string) (*api.Manifest, error) { func (c *Client) DownloadManifest(hash string) (*api.Manifest, bool, error) {
res, err := c.DownloadRaw(hash) res, isEncrypted, err := c.DownloadRaw(hash)
if err != nil { if err != nil {
return nil, err return nil, isEncrypted, err
} }
defer res.Close() defer res.Close()
var manifest api.Manifest var manifest api.Manifest
if err := json.NewDecoder(res).Decode(&manifest); err != nil { if err := json.NewDecoder(res).Decode(&manifest); err != nil {
return nil, err return nil, isEncrypted, err
} }
return &manifest, nil return &manifest, isEncrypted, nil
} }
// List list files in a swarm manifest which have the given prefix, grouping // List list files in a swarm manifest which have the given prefix, grouping
@ -350,10 +440,19 @@ type UploadFn func(file *File) error
// TarUpload uses the given Uploader to upload files to swarm as a tar stream, // TarUpload uses the given Uploader to upload files to swarm as a tar stream,
// returning the resulting manifest hash // returning the resulting manifest hash
func (c *Client) TarUpload(hash string, uploader Uploader) (string, error) { func (c *Client) TarUpload(hash string, uploader Uploader, toEncrypt bool) (string, error) {
reqR, reqW := io.Pipe() reqR, reqW := io.Pipe()
defer reqR.Close() defer reqR.Close()
req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR) addr := hash
// If there is a hash already (a manifest), then that manifest will determine if the upload has
// to be encrypted or not. If there is no manifest then the toEncrypt parameter decides if
// there is encryption or not.
if hash == "" && toEncrypt {
// This is the built-in address for the encrypted upload endpoint
addr = "encrypt"
}
req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+addr, reqR)
if err != nil { if err != nil {
return "", err return "", err
} }

@ -26,28 +26,43 @@ import (
"testing" "testing"
"github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/api"
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
"github.com/ethereum/go-ethereum/swarm/testutil" "github.com/ethereum/go-ethereum/swarm/testutil"
) )
func serverFunc(api *api.API) testutil.TestServer {
return swarmhttp.NewServer(api)
}
// TestClientUploadDownloadRaw test uploading and downloading raw data to swarm // TestClientUploadDownloadRaw test uploading and downloading raw data to swarm
func TestClientUploadDownloadRaw(t *testing.T) { func TestClientUploadDownloadRaw(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) testClientUploadDownloadRaw(false, t)
}
func TestClientUploadDownloadRawEncrypted(t *testing.T) {
testClientUploadDownloadRaw(true, t)
}
func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) {
srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
client := NewClient(srv.URL) client := NewClient(srv.URL)
// upload some raw data // upload some raw data
data := []byte("foo123") data := []byte("foo123")
hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data))) hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
// check we can download the same data // check we can download the same data
res, err := client.DownloadRaw(hash) res, isEncrypted, err := client.DownloadRaw(hash)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if isEncrypted != toEncrypt {
t.Fatalf("Expected encyption status %v got %v", toEncrypt, isEncrypted)
}
defer res.Close() defer res.Close()
gotData, err := ioutil.ReadAll(res) gotData, err := ioutil.ReadAll(res)
if err != nil { if err != nil {
@ -61,7 +76,15 @@ func TestClientUploadDownloadRaw(t *testing.T) {
// TestClientUploadDownloadFiles test uploading and downloading files to swarm // TestClientUploadDownloadFiles test uploading and downloading files to swarm
// manifests // manifests
func TestClientUploadDownloadFiles(t *testing.T) { func TestClientUploadDownloadFiles(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) testClientUploadDownloadFiles(false, t)
}
func TestClientUploadDownloadFilesEncrypted(t *testing.T) {
testClientUploadDownloadFiles(true, t)
}
func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) {
srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
client := NewClient(srv.URL) client := NewClient(srv.URL)
@ -74,7 +97,7 @@ func TestClientUploadDownloadFiles(t *testing.T) {
Size: int64(len(data)), Size: int64(len(data)),
}, },
} }
hash, err := client.Upload(file, manifest) hash, err := client.Upload(file, manifest, toEncrypt)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -159,7 +182,7 @@ func newTestDirectory(t *testing.T) string {
// TestClientUploadDownloadDirectory tests uploading and downloading a // TestClientUploadDownloadDirectory tests uploading and downloading a
// directory of files to a swarm manifest // directory of files to a swarm manifest
func TestClientUploadDownloadDirectory(t *testing.T) { func TestClientUploadDownloadDirectory(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
dir := newTestDirectory(t) dir := newTestDirectory(t)
@ -168,7 +191,7 @@ func TestClientUploadDownloadDirectory(t *testing.T) {
// upload the directory // upload the directory
client := NewClient(srv.URL) client := NewClient(srv.URL)
defaultPath := filepath.Join(dir, testDirFiles[0]) defaultPath := filepath.Join(dir, testDirFiles[0])
hash, err := client.UploadDirectory(dir, defaultPath, "") hash, err := client.UploadDirectory(dir, defaultPath, "", false)
if err != nil { if err != nil {
t.Fatalf("error uploading directory: %s", err) t.Fatalf("error uploading directory: %s", err)
} }
@ -217,14 +240,22 @@ func TestClientUploadDownloadDirectory(t *testing.T) {
// TestClientFileList tests listing files in a swarm manifest // TestClientFileList tests listing files in a swarm manifest
func TestClientFileList(t *testing.T) { func TestClientFileList(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) testClientFileList(false, t)
}
func TestClientFileListEncrypted(t *testing.T) {
testClientFileList(true, t)
}
func testClientFileList(toEncrypt bool, t *testing.T) {
srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
dir := newTestDirectory(t) dir := newTestDirectory(t)
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
client := NewClient(srv.URL) client := NewClient(srv.URL)
hash, err := client.UploadDirectory(dir, "", "") hash, err := client.UploadDirectory(dir, "", "", toEncrypt)
if err != nil { if err != nil {
t.Fatalf("error uploading directory: %s", err) t.Fatalf("error uploading directory: %s", err)
} }
@ -275,7 +306,7 @@ func TestClientFileList(t *testing.T) {
// TestClientMultipartUpload tests uploading files to swarm using a multipart // TestClientMultipartUpload tests uploading files to swarm using a multipart
// upload // upload
func TestClientMultipartUpload(t *testing.T) { func TestClientMultipartUpload(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
// define an uploader which uploads testDirFiles with some data // define an uploader which uploads testDirFiles with some data

@ -21,13 +21,16 @@ import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/contracts/ens" "github.com/ethereum/go-ethereum/contracts/ens"
"github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network" "github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/pss"
"github.com/ethereum/go-ethereum/swarm/services/swap" "github.com/ethereum/go-ethereum/swarm/services/swap"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
@ -41,47 +44,55 @@ const (
// allow several bzz nodes running in parallel // allow several bzz nodes running in parallel
type Config struct { type Config struct {
// serialised/persisted fields // serialised/persisted fields
*storage.StoreParams *storage.FileStoreParams
*storage.ChunkerParams *storage.LocalStoreParams
*network.HiveParams *network.HiveParams
Swap *swap.SwapParams Swap *swap.LocalProfile
*network.SyncParams Pss *pss.PssParams
Contract common.Address //*network.SyncParams
EnsRoot common.Address Contract common.Address
EnsAPIs []string EnsRoot common.Address
Path string EnsAPIs []string
ListenAddr string Path string
Port string ListenAddr string
PublicKey string Port string
BzzKey string PublicKey string
NetworkId uint64 BzzKey string
SwapEnabled bool NodeID string
SyncEnabled bool NetworkID uint64
SwapApi string SwapEnabled bool
Cors string SyncEnabled bool
BzzAccount string DeliverySkipCheck bool
BootNodes string SyncUpdateDelay time.Duration
SwapAPI string
Cors string
BzzAccount string
BootNodes string
privateKey *ecdsa.PrivateKey
} }
//create a default config with all parameters to set to defaults //create a default config with all parameters to set to defaults
func NewDefaultConfig() (self *Config) { func NewConfig() (c *Config) {
self = &Config{ c = &Config{
StoreParams: storage.NewDefaultStoreParams(), LocalStoreParams: storage.NewDefaultLocalStoreParams(),
ChunkerParams: storage.NewChunkerParams(), FileStoreParams: storage.NewFileStoreParams(),
HiveParams: network.NewDefaultHiveParams(), HiveParams: network.NewHiveParams(),
SyncParams: network.NewDefaultSyncParams(), //SyncParams: network.NewDefaultSyncParams(),
Swap: swap.NewDefaultSwapParams(), Swap: swap.NewDefaultSwapParams(),
ListenAddr: DefaultHTTPListenAddr, Pss: pss.NewPssParams(),
Port: DefaultHTTPPort, ListenAddr: DefaultHTTPListenAddr,
Path: node.DefaultDataDir(), Port: DefaultHTTPPort,
EnsAPIs: nil, Path: node.DefaultDataDir(),
EnsRoot: ens.TestNetAddress, EnsAPIs: nil,
NetworkId: network.NetworkId, EnsRoot: ens.TestNetAddress,
SwapEnabled: false, NetworkID: network.DefaultNetworkID,
SyncEnabled: true, SwapEnabled: false,
SwapApi: "", SyncEnabled: true,
BootNodes: "", DeliverySkipCheck: false,
SyncUpdateDelay: 15 * time.Second,
SwapAPI: "",
BootNodes: "",
} }
return return
@ -89,11 +100,11 @@ func NewDefaultConfig() (self *Config) {
//some config params need to be initialized after the complete //some config params need to be initialized after the complete
//config building phase is completed (e.g. due to overriding flags) //config building phase is completed (e.g. due to overriding flags)
func (self *Config) Init(prvKey *ecdsa.PrivateKey) { func (c *Config) Init(prvKey *ecdsa.PrivateKey) {
address := crypto.PubkeyToAddress(prvKey.PublicKey) address := crypto.PubkeyToAddress(prvKey.PublicKey)
self.Path = filepath.Join(self.Path, "bzz-"+common.Bytes2Hex(address.Bytes())) c.Path = filepath.Join(c.Path, "bzz-"+common.Bytes2Hex(address.Bytes()))
err := os.MkdirAll(self.Path, os.ModePerm) err := os.MkdirAll(c.Path, os.ModePerm)
if err != nil { if err != nil {
log.Error(fmt.Sprintf("Error creating root swarm data directory: %v", err)) log.Error(fmt.Sprintf("Error creating root swarm data directory: %v", err))
return return
@ -103,11 +114,25 @@ func (self *Config) Init(prvKey *ecdsa.PrivateKey) {
pubkeyhex := common.ToHex(pubkey) pubkeyhex := common.ToHex(pubkey)
keyhex := crypto.Keccak256Hash(pubkey).Hex() keyhex := crypto.Keccak256Hash(pubkey).Hex()
self.PublicKey = pubkeyhex c.PublicKey = pubkeyhex
self.BzzKey = keyhex c.BzzKey = keyhex
c.NodeID = discover.PubkeyID(&prvKey.PublicKey).String()
if c.SwapEnabled {
c.Swap.Init(c.Contract, prvKey)
}
c.privateKey = prvKey
c.LocalStoreParams.Init(c.Path)
c.LocalStoreParams.BaseKey = common.FromHex(keyhex)
self.Swap.Init(self.Contract, prvKey) c.Pss = c.Pss.WithPrivateKey(c.privateKey)
self.SyncParams.Init(self.Path) }
self.HiveParams.Init(self.Path)
self.StoreParams.Init(self.Path) func (c *Config) ShiftPrivateKey() (privKey *ecdsa.PrivateKey) {
if c.privateKey != nil {
privKey = c.privateKey
c.privateKey = nil
}
return privKey
} }

@ -33,9 +33,10 @@ func TestConfig(t *testing.T) {
t.Fatalf("failed to load private key: %v", err) t.Fatalf("failed to load private key: %v", err)
} }
one := NewDefaultConfig() one := NewConfig()
two := NewDefaultConfig() two := NewConfig()
one.LocalStoreParams = two.LocalStoreParams
if equal := reflect.DeepEqual(one, two); !equal { if equal := reflect.DeepEqual(one, two); !equal {
t.Fatal("Two default configs are not equal") t.Fatal("Two default configs are not equal")
} }
@ -49,21 +50,10 @@ func TestConfig(t *testing.T) {
if one.PublicKey == "" { if one.PublicKey == "" {
t.Fatal("Expected PublicKey to be set") t.Fatal("Expected PublicKey to be set")
} }
if one.Swap.PayProfile.Beneficiary == (common.Address{}) && one.SwapEnabled {
//the Init function should append subdirs to the given path
if one.Swap.PayProfile.Beneficiary == (common.Address{}) {
t.Fatal("Failed to correctly initialize SwapParams") t.Fatal("Failed to correctly initialize SwapParams")
} }
if one.ChunkDbPath == one.Path {
if one.SyncParams.RequestDbPath == one.Path {
t.Fatal("Failed to correctly initialize SyncParams")
}
if one.HiveParams.KadDbPath == one.Path {
t.Fatal("Failed to correctly initialize HiveParams")
}
if one.StoreParams.ChunkDbPath == one.Path {
t.Fatal("Failed to correctly initialize StoreParams") t.Fatal("Failed to correctly initialize StoreParams")
} }
} }

@ -27,26 +27,27 @@ import (
"sync" "sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
const maxParallelFiles = 5 const maxParallelFiles = 5
type FileSystem struct { type FileSystem struct {
api *Api api *API
} }
func NewFileSystem(api *Api) *FileSystem { func NewFileSystem(api *API) *FileSystem {
return &FileSystem{api} return &FileSystem{api}
} }
// Upload replicates a local directory as a manifest file and uploads it // Upload replicates a local directory as a manifest file and uploads it
// using dpa store // using FileStore store
// This function waits the chunks to be stored.
// TODO: localpath should point to a manifest // TODO: localpath should point to a manifest
// //
// DEPRECATED: Use the HTTP API instead // DEPRECATED: Use the HTTP API instead
func (self *FileSystem) Upload(lpath, index string) (string, error) { func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error) {
var list []*manifestTrieEntry var list []*manifestTrieEntry
localpath, err := filepath.Abs(filepath.Clean(lpath)) localpath, err := filepath.Abs(filepath.Clean(lpath))
if err != nil { if err != nil {
@ -111,13 +112,13 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
f, err := os.Open(entry.Path) f, err := os.Open(entry.Path)
if err == nil { if err == nil {
stat, _ := f.Stat() stat, _ := f.Stat()
var hash storage.Key var hash storage.Address
wg := &sync.WaitGroup{} var wait func()
hash, err = self.api.dpa.Store(f, stat.Size(), wg, nil) hash, wait, err = fs.api.fileStore.Store(f, stat.Size(), toEncrypt)
if hash != nil { if hash != nil {
list[i].Hash = hash.String() list[i].Hash = hash.Hex()
} }
wg.Wait() wait()
awg.Done() awg.Done()
if err == nil { if err == nil {
first512 := make([]byte, 512) first512 := make([]byte, 512)
@ -142,7 +143,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
} }
trie := &manifestTrie{ trie := &manifestTrie{
dpa: self.api.dpa, fileStore: fs.api.fileStore,
} }
quitC := make(chan bool) quitC := make(chan bool)
for i, entry := range list { for i, entry := range list {
@ -163,7 +164,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
err2 := trie.recalcAndStore() err2 := trie.recalcAndStore()
var hs string var hs string
if err2 == nil { if err2 == nil {
hs = trie.hash.String() hs = trie.ref.Hex()
} }
awg.Wait() awg.Wait()
return hs, err2 return hs, err2
@ -173,7 +174,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) {
// under localpath // under localpath
// //
// DEPRECATED: Use the HTTP API instead // DEPRECATED: Use the HTTP API instead
func (self *FileSystem) Download(bzzpath, localpath string) error { func (fs *FileSystem) Download(bzzpath, localpath string) error {
lpath, err := filepath.Abs(filepath.Clean(localpath)) lpath, err := filepath.Abs(filepath.Clean(localpath))
if err != nil { if err != nil {
return err return err
@ -188,7 +189,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
if err != nil { if err != nil {
return err return err
} }
key, err := self.api.Resolve(uri) addr, err := fs.api.Resolve(uri)
if err != nil { if err != nil {
return err return err
} }
@ -199,14 +200,14 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
} }
quitC := make(chan bool) quitC := make(chan bool)
trie, err := loadManifest(self.api.dpa, key, quitC) trie, err := loadManifest(fs.api.fileStore, addr, quitC)
if err != nil { if err != nil {
log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err)) log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err))
return err return err
} }
type downloadListEntry struct { type downloadListEntry struct {
key storage.Key addr storage.Address
path string path string
} }
@ -217,7 +218,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) { err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) {
log.Trace(fmt.Sprintf("fs.Download: %#v", entry)) log.Trace(fmt.Sprintf("fs.Download: %#v", entry))
key = common.Hex2Bytes(entry.Hash) addr = common.Hex2Bytes(entry.Hash)
path := lpath + "/" + suffix path := lpath + "/" + suffix
dir := filepath.Dir(path) dir := filepath.Dir(path)
if dir != prevPath { if dir != prevPath {
@ -225,7 +226,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
prevPath = dir prevPath = dir
} }
if (mde == nil) && (path != dir+"/") { if (mde == nil) && (path != dir+"/") {
list = append(list, &downloadListEntry{key: key, path: path}) list = append(list, &downloadListEntry{addr: addr, path: path})
} }
}) })
if err != nil { if err != nil {
@ -244,7 +245,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
} }
go func(i int, entry *downloadListEntry) { go func(i int, entry *downloadListEntry) {
defer wg.Done() defer wg.Done()
err := retrieveToFile(quitC, self.api.dpa, entry.key, entry.path) err := retrieveToFile(quitC, fs.api.fileStore, entry.addr, entry.path)
if err != nil { if err != nil {
select { select {
case errC <- err: case errC <- err:
@ -267,12 +268,12 @@ func (self *FileSystem) Download(bzzpath, localpath string) error {
} }
} }
func retrieveToFile(quitC chan bool, dpa *storage.DPA, key storage.Key, path string) error { func retrieveToFile(quitC chan bool, fileStore *storage.FileStore, addr storage.Address, path string) error {
f, err := os.Create(path) // TODO: basePath separators f, err := os.Create(path) // TODO: basePath separators
if err != nil { if err != nil {
return err return err
} }
reader := dpa.Retrieve(key) reader, _ := fileStore.Retrieve(addr)
writer := bufio.NewWriter(f) writer := bufio.NewWriter(f)
size, err := reader.Size(quitC) size, err := reader.Size(quitC)
if err != nil { if err != nil {

@ -21,7 +21,6 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sync"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -30,9 +29,9 @@ import (
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test") var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test")
func testFileSystem(t *testing.T, f func(*FileSystem)) { func testFileSystem(t *testing.T, f func(*FileSystem, bool)) {
testApi(t, func(api *Api) { testAPI(t, func(api *API, toEncrypt bool) {
f(NewFileSystem(api)) f(NewFileSystem(api), toEncrypt)
}) })
} }
@ -47,9 +46,9 @@ func readPath(t *testing.T, parts ...string) string {
} }
func TestApiDirUpload0(t *testing.T) { func TestApiDirUpload0(t *testing.T) {
testFileSystem(t, func(fs *FileSystem) { testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api api := fs.api
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "") bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -63,8 +62,8 @@ func TestApiDirUpload0(t *testing.T) {
exp = expResponse(content, "text/css", 0) exp = expResponse(content, "text/css", 0)
checkResponse(t, resp, exp) checkResponse(t, resp, exp)
key := storage.Key(common.Hex2Bytes(bzzhash)) addr := storage.Address(common.Hex2Bytes(bzzhash))
_, _, _, err = api.Get(key, "") _, _, _, _, err = api.Get(addr, "")
if err == nil { if err == nil {
t.Fatalf("expected error: %v", err) t.Fatalf("expected error: %v", err)
} }
@ -75,27 +74,28 @@ func TestApiDirUpload0(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
newbzzhash, err := fs.Upload(downloadDir, "") newbzzhash, err := fs.Upload(downloadDir, "", toEncrypt)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
if bzzhash != newbzzhash { // TODO: currently the hash is not deterministic in the encrypted case
if !toEncrypt && bzzhash != newbzzhash {
t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash) t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash)
} }
}) })
} }
func TestApiDirUploadModify(t *testing.T) { func TestApiDirUploadModify(t *testing.T) {
testFileSystem(t, func(fs *FileSystem) { testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api api := fs.api
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "") bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return
} }
key := storage.Key(common.Hex2Bytes(bzzhash)) addr := storage.Address(common.Hex2Bytes(bzzhash))
key, err = api.Modify(key, "index.html", "", "") addr, err = api.Modify(addr, "index.html", "", "")
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return
@ -105,24 +105,23 @@ func TestApiDirUploadModify(t *testing.T) {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return
} }
wg := &sync.WaitGroup{} hash, wait, err := api.Store(bytes.NewReader(index), int64(len(index)), toEncrypt)
hash, err := api.Store(bytes.NewReader(index), int64(len(index)), wg) wait()
wg.Wait()
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return
} }
key, err = api.Modify(key, "index2.html", hash.Hex(), "text/html; charset=utf-8") addr, err = api.Modify(addr, "index2.html", hash.Hex(), "text/html; charset=utf-8")
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return
} }
key, err = api.Modify(key, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") addr, err = api.Modify(addr, "img/logo.png", hash.Hex(), "text/html; charset=utf-8")
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return
} }
bzzhash = key.String() bzzhash = addr.Hex()
content := readPath(t, "testdata", "test0", "index.html") content := readPath(t, "testdata", "test0", "index.html")
resp := testGet(t, api, bzzhash, "index2.html") resp := testGet(t, api, bzzhash, "index2.html")
@ -138,7 +137,7 @@ func TestApiDirUploadModify(t *testing.T) {
exp = expResponse(content, "text/css", 0) exp = expResponse(content, "text/css", 0)
checkResponse(t, resp, exp) checkResponse(t, resp, exp)
_, _, _, err = api.Get(key, "") _, _, _, _, err = api.Get(addr, "")
if err == nil { if err == nil {
t.Errorf("expected error: %v", err) t.Errorf("expected error: %v", err)
} }
@ -146,9 +145,9 @@ func TestApiDirUploadModify(t *testing.T) {
} }
func TestApiDirUploadWithRootFile(t *testing.T) { func TestApiDirUploadWithRootFile(t *testing.T) {
testFileSystem(t, func(fs *FileSystem) { testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api api := fs.api
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html") bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html", toEncrypt)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return
@ -162,9 +161,9 @@ func TestApiDirUploadWithRootFile(t *testing.T) {
} }
func TestApiFileUpload(t *testing.T) { func TestApiFileUpload(t *testing.T) {
testFileSystem(t, func(fs *FileSystem) { testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api api := fs.api
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "") bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "", toEncrypt)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return
@ -178,9 +177,9 @@ func TestApiFileUpload(t *testing.T) {
} }
func TestApiFileUploadWithRootFile(t *testing.T) { func TestApiFileUploadWithRootFile(t *testing.T) {
testFileSystem(t, func(fs *FileSystem) { testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
api := fs.api api := fs.api
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html") bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html", toEncrypt)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
return return

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/api"
l "github.com/ethereum/go-ethereum/swarm/log"
) )
//templateMap holds a mapping of an HTTP error code to a template //templateMap holds a mapping of an HTTP error code to a template
@ -44,7 +45,7 @@ var (
) )
//parameters needed for formatting the correct HTML page //parameters needed for formatting the correct HTML page
type ErrorParams struct { type ResponseParams struct {
Msg string Msg string
Code int Code int
Timestamp string Timestamp string
@ -113,45 +114,49 @@ func ValidateCaseErrors(r *Request) string {
//For example, if the user requests bzz:/<hash>/read and that manifest contains entries //For example, if the user requests bzz:/<hash>/read and that manifest contains entries
//"readme.md" and "readinglist.txt", a HTML page is returned with this two links. //"readme.md" and "readinglist.txt", a HTML page is returned with this two links.
//This only applies if the manifest has no default entry //This only applies if the manifest has no default entry
func ShowMultipleChoices(w http.ResponseWriter, r *Request, list api.ManifestList) { func ShowMultipleChoices(w http.ResponseWriter, req *Request, list api.ManifestList) {
msg := "" msg := ""
if list.Entries == nil { if list.Entries == nil {
ShowError(w, r, "Could not resolve", http.StatusInternalServerError) Respond(w, req, "Could not resolve", http.StatusInternalServerError)
return return
} }
//make links relative //make links relative
//requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt" //requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt"
//to get clickable links, need to remove the ambiguous path, i.e. "read" //to get clickable links, need to remove the ambiguous path, i.e. "read"
idx := strings.LastIndex(r.RequestURI, "/") idx := strings.LastIndex(req.RequestURI, "/")
if idx == -1 { if idx == -1 {
ShowError(w, r, "Internal Server Error", http.StatusInternalServerError) Respond(w, req, "Internal Server Error", http.StatusInternalServerError)
return return
} }
//remove ambiguous part //remove ambiguous part
base := r.RequestURI[:idx+1] base := req.RequestURI[:idx+1]
for _, e := range list.Entries { for _, e := range list.Entries {
//create clickable link for each entry //create clickable link for each entry
msg += "<a href='" + base + e.Path + "'>" + e.Path + "</a><br/>" msg += "<a href='" + base + e.Path + "'>" + e.Path + "</a><br/>"
} }
respond(w, &r.Request, &ErrorParams{ Respond(w, req, msg, http.StatusMultipleChoices)
Code: http.StatusMultipleChoices,
Details: template.HTML(msg),
Timestamp: time.Now().Format(time.RFC1123),
template: getTemplate(http.StatusMultipleChoices),
})
} }
//ShowError is used to show an HTML error page to a client. //Respond is used to show an HTML page to a client.
//If there is an `Accept` header of `application/json`, JSON will be returned instead //If there is an `Accept` header of `application/json`, JSON will be returned instead
//The function just takes a string message which will be displayed in the error page. //The function just takes a string message which will be displayed in the error page.
//The code is used to evaluate which template will be displayed //The code is used to evaluate which template will be displayed
//(and return the correct HTTP status code) //(and return the correct HTTP status code)
func ShowError(w http.ResponseWriter, r *Request, msg string, code int) { func Respond(w http.ResponseWriter, req *Request, msg string, code int) {
additionalMessage := ValidateCaseErrors(r) additionalMessage := ValidateCaseErrors(req)
if code == http.StatusInternalServerError { switch code {
log.Error(msg) case http.StatusInternalServerError:
log.Output(msg, log.LvlError, l.CallDepth, "ruid", req.ruid, "code", code)
default:
log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code)
}
if code >= 400 {
w.Header().Del("Cache-Control") //avoid sending cache headers for errors!
w.Header().Del("ETag")
} }
respond(w, &r.Request, &ErrorParams{
respond(w, &req.Request, &ResponseParams{
Code: code, Code: code,
Msg: msg, Msg: msg,
Details: template.HTML(additionalMessage), Details: template.HTML(additionalMessage),
@ -161,17 +166,17 @@ func ShowError(w http.ResponseWriter, r *Request, msg string, code int) {
} }
//evaluate if client accepts html or json response //evaluate if client accepts html or json response
func respond(w http.ResponseWriter, r *http.Request, params *ErrorParams) { func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) {
w.WriteHeader(params.Code) w.WriteHeader(params.Code)
if r.Header.Get("Accept") == "application/json" { if r.Header.Get("Accept") == "application/json" {
respondJson(w, params) respondJSON(w, params)
} else { } else {
respondHtml(w, params) respondHTML(w, params)
} }
} }
//return a HTML page //return a HTML page
func respondHtml(w http.ResponseWriter, params *ErrorParams) { func respondHTML(w http.ResponseWriter, params *ResponseParams) {
htmlCounter.Inc(1) htmlCounter.Inc(1)
err := params.template.Execute(w, params) err := params.template.Execute(w, params)
if err != nil { if err != nil {
@ -180,7 +185,7 @@ func respondHtml(w http.ResponseWriter, params *ErrorParams) {
} }
//return JSON //return JSON
func respondJson(w http.ResponseWriter, params *ErrorParams) { func respondJSON(w http.ResponseWriter, params *ResponseParams) {
jsonCounter.Inc(1) jsonCounter.Inc(1)
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(params) json.NewEncoder(w).Encode(params)
@ -190,7 +195,6 @@ func respondJson(w http.ResponseWriter, params *ErrorParams) {
func getTemplate(code int) *template.Template { func getTemplate(code int) *template.Template {
if val, tmpl := templateMap[code]; tmpl { if val, tmpl := templateMap[code]; tmpl {
return val return val
} else {
return templateMap[0]
} }
return templateMap[0]
} }

File diff suppressed because one or more lines are too long

@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License // You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package http_test package http
import ( import (
"encoding/json" "encoding/json"
@ -30,7 +30,7 @@ import (
func TestError(t *testing.T) { func TestError(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
var resp *http.Response var resp *http.Response
@ -56,7 +56,7 @@ func TestError(t *testing.T) {
} }
func Test404Page(t *testing.T) { func Test404Page(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
var resp *http.Response var resp *http.Response
@ -82,7 +82,7 @@ func Test404Page(t *testing.T) {
} }
func Test500Page(t *testing.T) { func Test500Page(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
var resp *http.Response var resp *http.Response
@ -107,7 +107,7 @@ func Test500Page(t *testing.T) {
} }
} }
func Test500PageWith0xHashPrefix(t *testing.T) { func Test500PageWith0xHashPrefix(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
var resp *http.Response var resp *http.Response
@ -137,7 +137,7 @@ func Test500PageWith0xHashPrefix(t *testing.T) {
} }
func TestJsonResponse(t *testing.T) { func TestJsonResponse(t *testing.T) {
srv := testutil.NewTestSwarmServer(t) srv := testutil.NewTestSwarmServer(t, serverFunc)
defer srv.Close() defer srv.Close()
var resp *http.Response var resp *http.Response

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/log"
) )
/* /*

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

@ -78,7 +78,6 @@ var landingPageTemplate = template.Must(template.New("landingPage").Parse(`
<meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0">
<meta http-equiv="X-UA-Compatible" ww="chrome=1"> <meta http-equiv="X-UA-Compatible" ww="chrome=1">
<meta name="description" content="Ethereum/Swarm Landing page"> <meta name="description" content="Ethereum/Swarm Landing page">
<meta property="og:url" content="https://swarm-gateways.net/bzz:/theswarm.eth">
<style> <style>
body, div, header, footer { body, div, header, footer {
@ -206,7 +205,7 @@ var landingPageTemplate = template.Must(template.New("landingPage").Parse(`
<footer> <footer>
<p> <p>
Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution<br/> Swarm: Serverless Hosting Incentivised Peer-To-Peer Storage And Content Distribution<br/>
<a href="http://swarm-gateways.net/bzz:/theswarm.eth">Swarm</a> <a href="/bzz:/theswarm.eth">Swarm</a>
</p> </p>
</footer> </footer>

@ -24,16 +24,18 @@ import (
"io" "io"
"net/http" "net/http"
"strings" "strings"
"sync"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
) )
const ( const (
ManifestType = "application/bzz-manifest+json" ManifestType = "application/bzz-manifest+json"
ResourceContentType = "application/bzz-resource"
manifestSizeLimit = 5 * 1024 * 1024
) )
// Manifest represents a swarm manifest // Manifest represents a swarm manifest
@ -59,38 +61,58 @@ type ManifestList struct {
} }
// NewManifest creates and stores a new, empty manifest // NewManifest creates and stores a new, empty manifest
func (a *Api) NewManifest() (storage.Key, error) { func (a *API) NewManifest(toEncrypt bool) (storage.Address, error) {
var manifest Manifest var manifest Manifest
data, err := json.Marshal(&manifest) data, err := json.Marshal(&manifest)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return a.Store(bytes.NewReader(data), int64(len(data)), &sync.WaitGroup{}) key, wait, err := a.Store(bytes.NewReader(data), int64(len(data)), toEncrypt)
wait()
return key, err
}
// Manifest hack for supporting Mutable Resource Updates from the bzz: scheme
// see swarm/api/api.go:API.Get() for more information
func (a *API) NewResourceManifest(resourceAddr string) (storage.Address, error) {
var manifest Manifest
entry := ManifestEntry{
Hash: resourceAddr,
ContentType: ResourceContentType,
}
manifest.Entries = append(manifest.Entries, entry)
data, err := json.Marshal(&manifest)
if err != nil {
return nil, err
}
key, _, err := a.Store(bytes.NewReader(data), int64(len(data)), false)
return key, err
} }
// ManifestWriter is used to add and remove entries from an underlying manifest // ManifestWriter is used to add and remove entries from an underlying manifest
type ManifestWriter struct { type ManifestWriter struct {
api *Api api *API
trie *manifestTrie trie *manifestTrie
quitC chan bool quitC chan bool
} }
func (a *Api) NewManifestWriter(key storage.Key, quitC chan bool) (*ManifestWriter, error) { func (a *API) NewManifestWriter(addr storage.Address, quitC chan bool) (*ManifestWriter, error) {
trie, err := loadManifest(a.dpa, key, quitC) trie, err := loadManifest(a.fileStore, addr, quitC)
if err != nil { if err != nil {
return nil, fmt.Errorf("error loading manifest %s: %s", key, err) return nil, fmt.Errorf("error loading manifest %s: %s", addr, err)
} }
return &ManifestWriter{a, trie, quitC}, nil return &ManifestWriter{a, trie, quitC}, nil
} }
// AddEntry stores the given data and adds the resulting key to the manifest // AddEntry stores the given data and adds the resulting key to the manifest
func (m *ManifestWriter) AddEntry(data io.Reader, e *ManifestEntry) (storage.Key, error) { func (m *ManifestWriter) AddEntry(data io.Reader, e *ManifestEntry) (storage.Address, error) {
key, err := m.api.Store(data, e.Size, nil)
key, _, err := m.api.Store(data, e.Size, m.trie.encrypted)
if err != nil { if err != nil {
return nil, err return nil, err
} }
entry := newManifestTrieEntry(e, nil) entry := newManifestTrieEntry(e, nil)
entry.Hash = key.String() entry.Hash = key.Hex()
m.trie.addEntry(entry, m.quitC) m.trie.addEntry(entry, m.quitC)
return key, nil return key, nil
} }
@ -102,29 +124,29 @@ func (m *ManifestWriter) RemoveEntry(path string) error {
} }
// Store stores the manifest, returning the resulting storage key // Store stores the manifest, returning the resulting storage key
func (m *ManifestWriter) Store() (storage.Key, error) { func (m *ManifestWriter) Store() (storage.Address, error) {
return m.trie.hash, m.trie.recalcAndStore() return m.trie.ref, m.trie.recalcAndStore()
} }
// ManifestWalker is used to recursively walk the entries in the manifest and // ManifestWalker is used to recursively walk the entries in the manifest and
// all of its submanifests // all of its submanifests
type ManifestWalker struct { type ManifestWalker struct {
api *Api api *API
trie *manifestTrie trie *manifestTrie
quitC chan bool quitC chan bool
} }
func (a *Api) NewManifestWalker(key storage.Key, quitC chan bool) (*ManifestWalker, error) { func (a *API) NewManifestWalker(addr storage.Address, quitC chan bool) (*ManifestWalker, error) {
trie, err := loadManifest(a.dpa, key, quitC) trie, err := loadManifest(a.fileStore, addr, quitC)
if err != nil { if err != nil {
return nil, fmt.Errorf("error loading manifest %s: %s", key, err) return nil, fmt.Errorf("error loading manifest %s: %s", addr, err)
} }
return &ManifestWalker{a, trie, quitC}, nil return &ManifestWalker{a, trie, quitC}, nil
} }
// SkipManifest is used as a return value from WalkFn to indicate that the // ErrSkipManifest is used as a return value from WalkFn to indicate that the
// manifest should be skipped // manifest should be skipped
var SkipManifest = errors.New("skip this manifest") var ErrSkipManifest = errors.New("skip this manifest")
// WalkFn is the type of function called for each entry visited by a recursive // WalkFn is the type of function called for each entry visited by a recursive
// manifest walk // manifest walk
@ -144,7 +166,7 @@ func (m *ManifestWalker) walk(trie *manifestTrie, prefix string, walkFn WalkFn)
entry.Path = prefix + entry.Path entry.Path = prefix + entry.Path
err := walkFn(&entry.ManifestEntry) err := walkFn(&entry.ManifestEntry)
if err != nil { if err != nil {
if entry.ContentType == ManifestType && err == SkipManifest { if entry.ContentType == ManifestType && err == ErrSkipManifest {
continue continue
} }
return err return err
@ -163,9 +185,10 @@ func (m *ManifestWalker) walk(trie *manifestTrie, prefix string, walkFn WalkFn)
} }
type manifestTrie struct { type manifestTrie struct {
dpa *storage.DPA fileStore *storage.FileStore
entries [257]*manifestTrieEntry // indexed by first character of basePath, entries[256] is the empty basePath entry entries [257]*manifestTrieEntry // indexed by first character of basePath, entries[256] is the empty basePath entry
hash storage.Key // if hash != nil, it is stored ref storage.Address // if ref != nil, it is stored
encrypted bool
} }
func newManifestTrieEntry(entry *ManifestEntry, subtrie *manifestTrie) *manifestTrieEntry { func newManifestTrieEntry(entry *ManifestEntry, subtrie *manifestTrie) *manifestTrieEntry {
@ -181,48 +204,55 @@ type manifestTrieEntry struct {
subtrie *manifestTrie subtrie *manifestTrie
} }
func loadManifest(dpa *storage.DPA, hash storage.Key, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand func loadManifest(fileStore *storage.FileStore, hash storage.Address, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
log.Trace("manifest lookup", "key", hash)
log.Trace(fmt.Sprintf("manifest lookup key: '%v'.", hash.Log())) // retrieve manifest via FileStore
// retrieve manifest via DPA manifestReader, isEncrypted := fileStore.Retrieve(hash)
manifestReader := dpa.Retrieve(hash) log.Trace("reader retrieved", "key", hash)
return readManifest(manifestReader, hash, dpa, quitC) return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC)
} }
func readManifest(manifestReader storage.LazySectionReader, hash storage.Key, dpa *storage.DPA, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand func readManifest(manifestReader storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
// TODO check size for oversized manifests // TODO check size for oversized manifests
size, err := manifestReader.Size(quitC) size, err := manifestReader.Size(quitC)
if err != nil { // size == 0 if err != nil { // size == 0
// can't determine size means we don't have the root chunk // can't determine size means we don't have the root chunk
log.Trace("manifest not found", "key", hash)
err = fmt.Errorf("Manifest not Found") err = fmt.Errorf("Manifest not Found")
return return
} }
if size > manifestSizeLimit {
log.Warn("manifest exceeds size limit", "key", hash, "size", size, "limit", manifestSizeLimit)
err = fmt.Errorf("Manifest size of %v bytes exceeds the %v byte limit", size, manifestSizeLimit)
return
}
manifestData := make([]byte, size) manifestData := make([]byte, size)
read, err := manifestReader.Read(manifestData) read, err := manifestReader.Read(manifestData)
if int64(read) < size { if int64(read) < size {
log.Trace(fmt.Sprintf("Manifest %v not found.", hash.Log())) log.Trace("manifest not found", "key", hash)
if err == nil { if err == nil {
err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size) err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size)
} }
return return
} }
log.Trace(fmt.Sprintf("Manifest %v retrieved", hash.Log())) log.Debug("manifest retrieved", "key", hash)
var man struct { var man struct {
Entries []*manifestTrieEntry `json:"entries"` Entries []*manifestTrieEntry `json:"entries"`
} }
err = json.Unmarshal(manifestData, &man) err = json.Unmarshal(manifestData, &man)
if err != nil { if err != nil {
err = fmt.Errorf("Manifest %v is malformed: %v", hash.Log(), err) err = fmt.Errorf("Manifest %v is malformed: %v", hash.Log(), err)
log.Trace(fmt.Sprintf("%v", err)) log.Trace("malformed manifest", "key", hash)
return return
} }
log.Trace(fmt.Sprintf("Manifest %v has %d entries.", hash.Log(), len(man.Entries))) log.Trace("manifest entries", "key", hash, "len", len(man.Entries))
trie = &manifestTrie{ trie = &manifestTrie{
dpa: dpa, fileStore: fileStore,
encrypted: isEncrypted,
} }
for _, entry := range man.Entries { for _, entry := range man.Entries {
trie.addEntry(entry, quitC) trie.addEntry(entry, quitC)
@ -230,18 +260,18 @@ func readManifest(manifestReader storage.LazySectionReader, hash storage.Key, dp
return return
} }
func (self *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) { func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) {
self.hash = nil // trie modified, hash needs to be re-calculated on demand mt.ref = nil // trie modified, hash needs to be re-calculated on demand
if len(entry.Path) == 0 { if len(entry.Path) == 0 {
self.entries[256] = entry mt.entries[256] = entry
return return
} }
b := entry.Path[0] b := entry.Path[0]
oldentry := self.entries[b] oldentry := mt.entries[b]
if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) { if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) {
self.entries[b] = entry mt.entries[b] = entry
return return
} }
@ -251,7 +281,7 @@ func (self *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) {
} }
if (oldentry.ContentType == ManifestType) && (cpl == len(oldentry.Path)) { if (oldentry.ContentType == ManifestType) && (cpl == len(oldentry.Path)) {
if self.loadSubTrie(oldentry, quitC) != nil { if mt.loadSubTrie(oldentry, quitC) != nil {
return return
} }
entry.Path = entry.Path[cpl:] entry.Path = entry.Path[cpl:]
@ -263,21 +293,22 @@ func (self *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) {
commonPrefix := entry.Path[:cpl] commonPrefix := entry.Path[:cpl]
subtrie := &manifestTrie{ subtrie := &manifestTrie{
dpa: self.dpa, fileStore: mt.fileStore,
encrypted: mt.encrypted,
} }
entry.Path = entry.Path[cpl:] entry.Path = entry.Path[cpl:]
oldentry.Path = oldentry.Path[cpl:] oldentry.Path = oldentry.Path[cpl:]
subtrie.addEntry(entry, quitC) subtrie.addEntry(entry, quitC)
subtrie.addEntry(oldentry, quitC) subtrie.addEntry(oldentry, quitC)
self.entries[b] = newManifestTrieEntry(&ManifestEntry{ mt.entries[b] = newManifestTrieEntry(&ManifestEntry{
Path: commonPrefix, Path: commonPrefix,
ContentType: ManifestType, ContentType: ManifestType,
}, subtrie) }, subtrie)
} }
func (self *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) { func (mt *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) {
for _, e := range self.entries { for _, e := range mt.entries {
if e != nil { if e != nil {
cnt++ cnt++
entry = e entry = e
@ -286,27 +317,27 @@ func (self *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) {
return return
} }
func (self *manifestTrie) deleteEntry(path string, quitC chan bool) { func (mt *manifestTrie) deleteEntry(path string, quitC chan bool) {
self.hash = nil // trie modified, hash needs to be re-calculated on demand mt.ref = nil // trie modified, hash needs to be re-calculated on demand
if len(path) == 0 { if len(path) == 0 {
self.entries[256] = nil mt.entries[256] = nil
return return
} }
b := path[0] b := path[0]
entry := self.entries[b] entry := mt.entries[b]
if entry == nil { if entry == nil {
return return
} }
if entry.Path == path { if entry.Path == path {
self.entries[b] = nil mt.entries[b] = nil
return return
} }
epl := len(entry.Path) epl := len(entry.Path)
if (entry.ContentType == ManifestType) && (len(path) >= epl) && (path[:epl] == entry.Path) { if (entry.ContentType == ManifestType) && (len(path) >= epl) && (path[:epl] == entry.Path) {
if self.loadSubTrie(entry, quitC) != nil { if mt.loadSubTrie(entry, quitC) != nil {
return return
} }
entry.subtrie.deleteEntry(path[epl:], quitC) entry.subtrie.deleteEntry(path[epl:], quitC)
@ -317,13 +348,13 @@ func (self *manifestTrie) deleteEntry(path string, quitC chan bool) {
if lastentry != nil { if lastentry != nil {
lastentry.Path = entry.Path + lastentry.Path lastentry.Path = entry.Path + lastentry.Path
} }
self.entries[b] = lastentry mt.entries[b] = lastentry
} }
} }
} }
func (self *manifestTrie) recalcAndStore() error { func (mt *manifestTrie) recalcAndStore() error {
if self.hash != nil { if mt.ref != nil {
return nil return nil
} }
@ -331,14 +362,14 @@ func (self *manifestTrie) recalcAndStore() error {
buffer.WriteString(`{"entries":[`) buffer.WriteString(`{"entries":[`)
list := &Manifest{} list := &Manifest{}
for _, entry := range self.entries { for _, entry := range mt.entries {
if entry != nil { if entry != nil {
if entry.Hash == "" { // TODO: paralellize if entry.Hash == "" { // TODO: paralellize
err := entry.subtrie.recalcAndStore() err := entry.subtrie.recalcAndStore()
if err != nil { if err != nil {
return err return err
} }
entry.Hash = entry.subtrie.hash.String() entry.Hash = entry.subtrie.ref.Hex()
} }
list.Entries = append(list.Entries, entry.ManifestEntry) list.Entries = append(list.Entries, entry.ManifestEntry)
} }
@ -351,23 +382,22 @@ func (self *manifestTrie) recalcAndStore() error {
} }
sr := bytes.NewReader(manifest) sr := bytes.NewReader(manifest)
wg := &sync.WaitGroup{} key, wait, err2 := mt.fileStore.Store(sr, int64(len(manifest)), mt.encrypted)
key, err2 := self.dpa.Store(sr, int64(len(manifest)), wg, nil) wait()
wg.Wait() mt.ref = key
self.hash = key
return err2 return err2
} }
func (self *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) { func (mt *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) {
if entry.subtrie == nil { if entry.subtrie == nil {
hash := common.Hex2Bytes(entry.Hash) hash := common.Hex2Bytes(entry.Hash)
entry.subtrie, err = loadManifest(self.dpa, hash, quitC) entry.subtrie, err = loadManifest(mt.fileStore, hash, quitC)
entry.Hash = "" // might not match, should be recalculated entry.Hash = "" // might not match, should be recalculated
} }
return return
} }
func (self *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) error { func (mt *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) error {
plen := len(prefix) plen := len(prefix)
var start, stop int var start, stop int
if plen == 0 { if plen == 0 {
@ -384,7 +414,7 @@ func (self *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool,
return fmt.Errorf("aborted") return fmt.Errorf("aborted")
default: default:
} }
entry := self.entries[i] entry := mt.entries[i]
if entry != nil { if entry != nil {
epl := len(entry.Path) epl := len(entry.Path)
if entry.ContentType == ManifestType { if entry.ContentType == ManifestType {
@ -393,7 +423,7 @@ func (self *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool,
l = epl l = epl
} }
if prefix[:l] == entry.Path[:l] { if prefix[:l] == entry.Path[:l] {
err := self.loadSubTrie(entry, quitC) err := mt.loadSubTrie(entry, quitC)
if err != nil { if err != nil {
return err return err
} }
@ -412,23 +442,22 @@ func (self *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool,
return nil return nil
} }
func (self *manifestTrie) listWithPrefix(prefix string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) (err error) { func (mt *manifestTrie) listWithPrefix(prefix string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) (err error) {
return self.listWithPrefixInt(prefix, "", quitC, cb) return mt.listWithPrefixInt(prefix, "", quitC, cb)
} }
func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manifestTrieEntry, pos int) { func (mt *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manifestTrieEntry, pos int) {
log.Trace(fmt.Sprintf("findPrefixOf(%s)", path)) log.Trace(fmt.Sprintf("findPrefixOf(%s)", path))
if len(path) == 0 { if len(path) == 0 {
return self.entries[256], 0 return mt.entries[256], 0
} }
//see if first char is in manifest entries //see if first char is in manifest entries
b := path[0] b := path[0]
entry = self.entries[b] entry = mt.entries[b]
if entry == nil { if entry == nil {
return self.entries[256], 0 return mt.entries[256], 0
} }
epl := len(entry.Path) epl := len(entry.Path)
@ -436,7 +465,7 @@ func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *man
if len(path) <= epl { if len(path) <= epl {
if entry.Path[:len(path)] == path { if entry.Path[:len(path)] == path {
if entry.ContentType == ManifestType { if entry.ContentType == ManifestType {
err := self.loadSubTrie(entry, quitC) err := mt.loadSubTrie(entry, quitC)
if err == nil && entry.subtrie != nil { if err == nil && entry.subtrie != nil {
subentries := entry.subtrie.entries subentries := entry.subtrie.entries
for i := 0; i < len(subentries); i++ { for i := 0; i < len(subentries); i++ {
@ -457,7 +486,7 @@ func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *man
log.Trace(fmt.Sprintf("entry.ContentType = %v", entry.ContentType)) log.Trace(fmt.Sprintf("entry.ContentType = %v", entry.ContentType))
//the subentry is a manifest, load subtrie //the subentry is a manifest, load subtrie
if entry.ContentType == ManifestType && (strings.Contains(entry.Path, path) || strings.Contains(path, entry.Path)) { if entry.ContentType == ManifestType && (strings.Contains(entry.Path, path) || strings.Contains(path, entry.Path)) {
err := self.loadSubTrie(entry, quitC) err := mt.loadSubTrie(entry, quitC)
if err != nil { if err != nil {
return nil, 0 return nil, 0
} }
@ -478,7 +507,7 @@ func (self *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *man
pos = epl pos = epl
} }
} }
return return nil, 0
} }
// file system manifest always contains regularized paths // file system manifest always contains regularized paths
@ -495,10 +524,10 @@ func RegularSlashes(path string) (res string) {
return return
} }
func (self *manifestTrie) getEntry(spath string) (entry *manifestTrieEntry, fullpath string) { func (mt *manifestTrie) getEntry(spath string) (entry *manifestTrieEntry, fullpath string) {
path := RegularSlashes(spath) path := RegularSlashes(spath)
var pos int var pos int
quitC := make(chan bool) quitC := make(chan bool)
entry, pos = self.findPrefixOf(path, quitC) entry, pos = mt.findPrefixOf(path, quitC)
return entry, path[:pos] return entry, path[:pos]
} }

@ -42,7 +42,9 @@ func manifest(paths ...string) (manifestReader storage.LazySectionReader) {
func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie { func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie {
quitC := make(chan bool) quitC := make(chan bool)
trie, err := readManifest(manifest(paths...), nil, nil, quitC) fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC)
if err != nil { if err != nil {
t.Errorf("unexpected error making manifest: %v", err) t.Errorf("unexpected error making manifest: %v", err)
} }
@ -97,7 +99,9 @@ func TestGetEntry(t *testing.T) {
func TestExactMatch(t *testing.T) { func TestExactMatch(t *testing.T) {
quitC := make(chan bool) quitC := make(chan bool)
mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map") mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map")
trie, err := readManifest(mf, nil, nil, quitC) fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(mf, ref, fileStore, false, quitC)
if err != nil { if err != nil {
t.Errorf("unexpected error making manifest: %v", err) t.Errorf("unexpected error making manifest: %v", err)
} }
@ -128,7 +132,9 @@ func TestAddFileWithManifestPath(t *testing.T) {
reader := &storage.LazyTestSectionReader{ reader := &storage.LazyTestSectionReader{
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))), SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
} }
trie, err := readManifest(reader, nil, nil, nil) fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams())
ref := make([]byte, fileStore.HashSize())
trie, err := readManifest(reader, ref, fileStore, false, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -144,3 +150,26 @@ func TestAddFileWithManifestPath(t *testing.T) {
checkEntry(t, "ac", "ac", false, trie) checkEntry(t, "ac", "ac", false, trie)
checkEntry(t, "a", "a", false, trie) checkEntry(t, "a", "a", false, trie)
} }
// TestReadManifestOverSizeLimit creates a manifest reader with data longer then
// manifestSizeLimit and checks if readManifest function will return the exact error
// message.
// The manifest data is not in json-encoded format, preventing possbile
// successful parsing attempts if limit check fails.
func TestReadManifestOverSizeLimit(t *testing.T) {
manifest := make([]byte, manifestSizeLimit+1)
reader := &storage.LazyTestSectionReader{
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
}
_, err := readManifest(reader, storage.Address{}, nil, false, nil)
if err == nil {
t.Fatal("got no error from readManifest")
}
// Error message is part of the http response body
// which justifies exact string validation.
got := err.Error()
want := fmt.Sprintf("Manifest size of %v bytes exceeds the %v byte limit", len(manifest), manifestSizeLimit)
if got != want {
t.Fatalf("got error mesage %q, expected %q", got, want)
}
}

@ -16,7 +16,11 @@
package api package api
import "path" import (
"path"
"github.com/ethereum/go-ethereum/swarm/storage"
)
type Response struct { type Response struct {
MimeType string MimeType string
@ -30,10 +34,10 @@ type Response struct {
// //
// DEPRECATED: Use the HTTP API instead // DEPRECATED: Use the HTTP API instead
type Storage struct { type Storage struct {
api *Api api *API
} }
func NewStorage(api *Api) *Storage { func NewStorage(api *API) *Storage {
return &Storage{api} return &Storage{api}
} }
@ -41,12 +45,8 @@ func NewStorage(api *Api) *Storage {
// its content type // its content type
// //
// DEPRECATED: Use the HTTP API instead // DEPRECATED: Use the HTTP API instead
func (self *Storage) Put(content, contentType string) (string, error) { func (s *Storage) Put(content, contentType string, toEncrypt bool) (storage.Address, func(), error) {
key, err := self.api.Put(content, contentType) return s.api.Put(content, contentType, toEncrypt)
if err != nil {
return "", err
}
return key.String(), err
} }
// Get retrieves the content from bzzpath and reads the response in full // Get retrieves the content from bzzpath and reads the response in full
@ -57,16 +57,16 @@ func (self *Storage) Put(content, contentType string) (string, error) {
// size is resp.Size // size is resp.Size
// //
// DEPRECATED: Use the HTTP API instead // DEPRECATED: Use the HTTP API instead
func (self *Storage) Get(bzzpath string) (*Response, error) { func (s *Storage) Get(bzzpath string) (*Response, error) {
uri, err := Parse(path.Join("bzz:/", bzzpath)) uri, err := Parse(path.Join("bzz:/", bzzpath))
if err != nil { if err != nil {
return nil, err return nil, err
} }
key, err := self.api.Resolve(uri) addr, err := s.api.Resolve(uri)
if err != nil { if err != nil {
return nil, err return nil, err
} }
reader, mimeType, status, err := self.api.Get(key, uri.Path) reader, mimeType, status, _, err := s.api.Get(addr, uri.Path)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -87,18 +87,18 @@ func (self *Storage) Get(bzzpath string) (*Response, error) {
// and merge on to it. creating an entry w conentType (mime) // and merge on to it. creating an entry w conentType (mime)
// //
// DEPRECATED: Use the HTTP API instead // DEPRECATED: Use the HTTP API instead
func (self *Storage) Modify(rootHash, path, contentHash, contentType string) (newRootHash string, err error) { func (s *Storage) Modify(rootHash, path, contentHash, contentType string) (newRootHash string, err error) {
uri, err := Parse("bzz:/" + rootHash) uri, err := Parse("bzz:/" + rootHash)
if err != nil { if err != nil {
return "", err return "", err
} }
key, err := self.api.Resolve(uri) addr, err := s.api.Resolve(uri)
if err != nil { if err != nil {
return "", err return "", err
} }
key, err = self.api.Modify(key, path, contentHash, contentType) addr, err = s.api.Modify(addr, path, contentHash, contentType)
if err != nil { if err != nil {
return "", err return "", err
} }
return key.String(), nil return addr.Hex(), nil
} }

@ -20,22 +20,24 @@ import (
"testing" "testing"
) )
func testStorage(t *testing.T, f func(*Storage)) { func testStorage(t *testing.T, f func(*Storage, bool)) {
testApi(t, func(api *Api) { testAPI(t, func(api *API, toEncrypt bool) {
f(NewStorage(api)) f(NewStorage(api), toEncrypt)
}) })
} }
func TestStoragePutGet(t *testing.T) { func TestStoragePutGet(t *testing.T) {
testStorage(t, func(api *Storage) { testStorage(t, func(api *Storage, toEncrypt bool) {
content := "hello" content := "hello"
exp := expResponse(content, "text/plain", 0) exp := expResponse(content, "text/plain", 0)
// exp := expResponse([]byte(content), "text/plain", 0) // exp := expResponse([]byte(content), "text/plain", 0)
bzzhash, err := api.Put(content, exp.MimeType) bzzkey, wait, err := api.Put(content, exp.MimeType, toEncrypt)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
// to check put against the Api#Get wait()
bzzhash := bzzkey.Hex()
// to check put against the API#Get
resp0 := testGet(t, api.api, bzzhash, "") resp0 := testGet(t, api.api, bzzhash, "")
checkResponse(t, resp0, exp) checkResponse(t, resp0, exp)

@ -21,26 +21,26 @@ import (
) )
type Control struct { type Control struct {
api *Api api *API
hive *network.Hive hive *network.Hive
} }
func NewControl(api *Api, hive *network.Hive) *Control { func NewControl(api *API, hive *network.Hive) *Control {
return &Control{api, hive} return &Control{api, hive}
} }
func (self *Control) BlockNetworkRead(on bool) { //func (self *Control) BlockNetworkRead(on bool) {
self.hive.BlockNetworkRead(on) // self.hive.BlockNetworkRead(on)
} //}
//
func (self *Control) SyncEnabled(on bool) { //func (self *Control) SyncEnabled(on bool) {
self.hive.SyncEnabled(on) // self.hive.SyncEnabled(on)
} //}
//
func (self *Control) SwapEnabled(on bool) { //func (self *Control) SwapEnabled(on bool) {
self.hive.SwapEnabled(on) // self.hive.SwapEnabled(on)
} //}
//
func (self *Control) Hive() string { func (c *Control) Hive() string {
return self.hive.String() return c.hive.String()
} }

@ -19,9 +19,17 @@ package api
import ( import (
"fmt" "fmt"
"net/url" "net/url"
"regexp"
"strings" "strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/swarm/storage"
) )
//matches hex swarm hashes
// TODO: this is bad, it should not be hardcoded how long is a hash
var hashMatcher = regexp.MustCompile("^([0-9A-Fa-f]{64})([0-9A-Fa-f]{64})?$")
// URI is a reference to content stored in swarm. // URI is a reference to content stored in swarm.
type URI struct { type URI struct {
// Scheme has one of the following values: // Scheme has one of the following values:
@ -32,18 +40,15 @@ type URI struct {
// (address is not resolved) // (address is not resolved)
// * bzz-list - list of all files contained in a swarm manifest // * bzz-list - list of all files contained in a swarm manifest
// //
// Deprecated Schemes:
// * bzzr - raw swarm content
// * bzzi - immutable URI of an entry in a swarm manifest
// (address is not resolved)
// * bzz-hash - hash of swarm content
//
Scheme string Scheme string
// Addr is either a hexadecimal storage key or it an address which // Addr is either a hexadecimal storage address or it an address which
// resolves to a storage key // resolves to a storage address
Addr string Addr string
// addr stores the parsed storage address
addr storage.Address
// Path is the path to the content within a swarm manifest // Path is the path to the content within a swarm manifest
Path string Path string
} }
@ -59,7 +64,6 @@ type URI struct {
// * <scheme>://<addr>/<path> // * <scheme>://<addr>/<path>
// //
// with scheme one of bzz, bzz-raw, bzz-immutable, bzz-list or bzz-hash // with scheme one of bzz, bzz-raw, bzz-immutable, bzz-list or bzz-hash
// or deprecated ones bzzr and bzzi
func Parse(rawuri string) (*URI, error) { func Parse(rawuri string) (*URI, error) {
u, err := url.Parse(rawuri) u, err := url.Parse(rawuri)
if err != nil { if err != nil {
@ -69,7 +73,7 @@ func Parse(rawuri string) (*URI, error) {
// check the scheme is valid // check the scheme is valid
switch uri.Scheme { switch uri.Scheme {
case "bzz", "bzz-raw", "bzz-immutable", "bzz-list", "bzz-hash", "bzzr", "bzzi": case "bzz", "bzz-raw", "bzz-immutable", "bzz-list", "bzz-hash", "bzz-resource":
default: default:
return nil, fmt.Errorf("unknown scheme %q", u.Scheme) return nil, fmt.Errorf("unknown scheme %q", u.Scheme)
} }
@ -91,6 +95,9 @@ func Parse(rawuri string) (*URI, error) {
} }
return uri, nil return uri, nil
} }
func (u *URI) Resource() bool {
return u.Scheme == "bzz-resource"
}
func (u *URI) Raw() bool { func (u *URI) Raw() bool {
return u.Scheme == "bzz-raw" return u.Scheme == "bzz-raw"
@ -104,14 +111,6 @@ func (u *URI) List() bool {
return u.Scheme == "bzz-list" return u.Scheme == "bzz-list"
} }
func (u *URI) DeprecatedRaw() bool {
return u.Scheme == "bzzr"
}
func (u *URI) DeprecatedImmutable() bool {
return u.Scheme == "bzzi"
}
func (u *URI) Hash() bool { func (u *URI) Hash() bool {
return u.Scheme == "bzz-hash" return u.Scheme == "bzz-hash"
} }
@ -119,3 +118,14 @@ func (u *URI) Hash() bool {
func (u *URI) String() string { func (u *URI) String() string {
return u.Scheme + ":/" + u.Addr + "/" + u.Path return u.Scheme + ":/" + u.Addr + "/" + u.Path
} }
func (u *URI) Address() storage.Address {
if u.addr != nil {
return u.addr
}
if hashMatcher.MatchString(u.Addr) {
u.addr = common.Hex2Bytes(u.Addr)
return u.addr
}
return nil
}

@ -17,8 +17,11 @@
package api package api
import ( import (
"bytes"
"reflect" "reflect"
"testing" "testing"
"github.com/ethereum/go-ethereum/swarm/storage"
) )
func TestParseURI(t *testing.T) { func TestParseURI(t *testing.T) {
@ -32,6 +35,8 @@ func TestParseURI(t *testing.T) {
expectHash bool expectHash bool
expectDeprecatedRaw bool expectDeprecatedRaw bool
expectDeprecatedImmutable bool expectDeprecatedImmutable bool
expectValidKey bool
expectAddr storage.Address
} }
tests := []test{ tests := []test{
{ {
@ -120,24 +125,17 @@ func TestParseURI(t *testing.T) {
expectList: true, expectList: true,
}, },
{ {
uri: "bzzr:", uri: "bzz-raw://4378d19c26590f1a818ed7d6a62c3809e149b0999cab5ce5f26233b3b423bf8c",
expectURI: &URI{Scheme: "bzzr"}, expectURI: &URI{Scheme: "bzz-raw",
expectDeprecatedRaw: true, Addr: "4378d19c26590f1a818ed7d6a62c3809e149b0999cab5ce5f26233b3b423bf8c",
}, },
{ expectValidKey: true,
uri: "bzzr:/", expectRaw: true,
expectURI: &URI{Scheme: "bzzr"}, expectAddr: storage.Address{67, 120, 209, 156, 38, 89, 15, 26,
expectDeprecatedRaw: true, 129, 142, 215, 214, 166, 44, 56, 9,
}, 225, 73, 176, 153, 156, 171, 92, 229,
{ 242, 98, 51, 179, 180, 35, 191, 140,
uri: "bzzi:", },
expectURI: &URI{Scheme: "bzzi"},
expectDeprecatedImmutable: true,
},
{
uri: "bzzi:/",
expectURI: &URI{Scheme: "bzzi"},
expectDeprecatedImmutable: true,
}, },
} }
for _, x := range tests { for _, x := range tests {
@ -166,11 +164,14 @@ func TestParseURI(t *testing.T) {
if actual.Hash() != x.expectHash { if actual.Hash() != x.expectHash {
t.Fatalf("expected %s hash to be %t, got %t", x.uri, x.expectHash, actual.Hash()) t.Fatalf("expected %s hash to be %t, got %t", x.uri, x.expectHash, actual.Hash())
} }
if actual.DeprecatedRaw() != x.expectDeprecatedRaw { if x.expectValidKey {
t.Fatalf("expected %s deprecated raw to be %t, got %t", x.uri, x.expectDeprecatedRaw, actual.DeprecatedRaw()) if actual.Address() == nil {
} t.Fatalf("expected %s to return a valid key, got nil", x.uri)
if actual.DeprecatedImmutable() != x.expectDeprecatedImmutable { } else {
t.Fatalf("expected %s deprecated immutable to be %t, got %t", x.uri, x.expectDeprecatedImmutable, actual.DeprecatedImmutable()) if !bytes.Equal(x.expectAddr, actual.Address()) {
t.Fatalf("expected %s to be decoded to %v", x.expectURI.Addr, x.expectAddr)
}
}
} }
} }
} }

@ -0,0 +1,543 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bmt provides a binary merkle tree implementation
package bmt
import (
"fmt"
"hash"
"strings"
"sync"
"sync/atomic"
)
/*
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size
It is defined as the root hash of the binary merkle tree built over fixed size segments
of the underlying chunk using any base hash function (e.g keccak 256 SHA3).
Chunk with data shorter than the fixed size are hashed as if they had zero padding
BMT hash is used as the chunk hash function in swarm which in turn is the basis for the
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
segment is a substring of a chunk starting at a particular offset
The size of the underlying segments is fixed to the size of the base hash (called the resolution
of the BMT hash), Using Keccak256 SHA3 hash is 32 bytes, the EVM word size to optimize for on-chain BMT verification
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
Two implementations are provided:
* RefHasher is optimized for code simplicity and meant as a reference implementation
that is simple to understand
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
control structure to coordinate the concurrent routines
It implements the following interfaces
* standard golang hash.Hash
* SwarmHash
* io.Writer
* TODO: SegmentWriter
*/
const (
// SegmentCount is the maximum number of segments of the underlying chunk
// Should be equal to max-chunk-data-size / hash-size
SegmentCount = 128
// PoolSize is the maximum number of bmt trees used by the hashers, i.e,
// the maximum number of concurrent BMT hashing operations performed by the same hasher
PoolSize = 8
)
// BaseHasherFunc is a hash.Hash constructor function used for the base hash of the BMT.
// implemented by Keccak256 SHA3 sha3.NewKeccak256
type BaseHasherFunc func() hash.Hash
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
// - implements the hash.Hash interface
// - reuses a pool of trees for amortised memory allocation and resource control
// - supports order-agnostic concurrent segment writes (TODO:)
// as well as sequential read and write
// - the same hasher instance must not be called concurrently on more than one chunk
// - the same hasher instance is synchronously reuseable
// - Sum gives back the tree to the pool and guaranteed to leave
// the tree and itself in a state reusable for hashing a new chunk
// - generates and verifies segment inclusion proofs (TODO:)
type Hasher struct {
pool *TreePool // BMT resource pool
bmt *tree // prebuilt BMT resource for flowcontrol and proofs
}
// New creates a reusable Hasher
// implements the hash.Hash interface
// pulls a new tree from a resource pool for hashing each chunk
func New(p *TreePool) *Hasher {
return &Hasher{
pool: p,
}
}
// TreePool provides a pool of trees used as resources by Hasher
// a tree popped from the pool is guaranteed to have clean state
// for hashing a new chunk
type TreePool struct {
lock sync.Mutex
c chan *tree // the channel to obtain a resource from the pool
hasher BaseHasherFunc // base hasher to use for the BMT levels
SegmentSize int // size of leaf segments, stipulated to be = hash size
SegmentCount int // the number of segments on the base level of the BMT
Capacity int // pool capacity, controls concurrency
Depth int // depth of the bmt trees = int(log2(segmentCount))+1
Datalength int // the total length of the data (count * size)
count int // current count of (ever) allocated resources
zerohashes [][]byte // lookup table for predictable padding subtrees for all levels
}
// NewTreePool creates a tree pool with hasher, segment size, segment count and capacity
// on Hasher.getTree it reuses free trees or creates a new one if capacity is not reached
func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool {
// initialises the zerohashes lookup table
depth := calculateDepthFor(segmentCount)
segmentSize := hasher().Size()
zerohashes := make([][]byte, depth)
zeros := make([]byte, segmentSize)
zerohashes[0] = zeros
h := hasher()
for i := 1; i < depth; i++ {
h.Reset()
h.Write(zeros)
h.Write(zeros)
zeros = h.Sum(nil)
zerohashes[i] = zeros
}
return &TreePool{
c: make(chan *tree, capacity),
hasher: hasher,
SegmentSize: segmentSize,
SegmentCount: segmentCount,
Capacity: capacity,
Datalength: segmentCount * segmentSize,
Depth: depth,
zerohashes: zerohashes,
}
}
// Drain drains the pool until it has no more than n resources
func (p *TreePool) Drain(n int) {
p.lock.Lock()
defer p.lock.Unlock()
for len(p.c) > n {
<-p.c
p.count--
}
}
// Reserve is blocking until it returns an available tree
// it reuses free trees or creates a new one if size is not reached
// TODO: should use a context here
func (p *TreePool) reserve() *tree {
p.lock.Lock()
defer p.lock.Unlock()
var t *tree
if p.count == p.Capacity {
return <-p.c
}
select {
case t = <-p.c:
default:
t = newTree(p.SegmentSize, p.Depth)
p.count++
}
return t
}
// release gives back a tree to the pool.
// this tree is guaranteed to be in reusable state
func (p *TreePool) release(t *tree) {
p.c <- t // can never fail ...
}
// tree is a reusable control structure representing a BMT
// organised in a binary tree
// Hasher uses a TreePool to obtain a tree for each chunk hash
// the tree is 'locked' while not in the pool
type tree struct {
leaves []*node // leaf nodes of the tree, other nodes accessible via parent links
cur int // index of rightmost currently open segment
offset int // offset (cursor position) within currently open segment
segment []byte // the rightmost open segment (not complete)
section []byte // the rightmost open section (double segment)
depth int // number of levels
result chan []byte // result channel
hash []byte // to record the result
span []byte // The span of the data subsumed under the chunk
}
// node is a reuseable segment hasher representing a node in a BMT
type node struct {
isLeft bool // whether it is left side of the parent double segment
parent *node // pointer to parent node in the BMT
state int32 // atomic increment impl concurrent boolean toggle
left, right []byte // this is where the content segment is set
}
// newNode constructs a segment hasher node in the BMT (used by newTree)
func newNode(index int, parent *node) *node {
return &node{
parent: parent,
isLeft: index%2 == 0,
}
}
// Draw draws the BMT (badly)
func (t *tree) draw(hash []byte) string {
var left, right []string
var anc []*node
for i, n := range t.leaves {
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
if i%2 == 0 {
anc = append(anc, n.parent)
}
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
}
anc = t.leaves
var hashes [][]string
for l := 0; len(anc) > 0; l++ {
var nodes []*node
hash := []string{""}
for i, n := range anc {
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
if i%2 == 0 && n.parent != nil {
nodes = append(nodes, n.parent)
}
}
hash = append(hash, "")
hashes = append(hashes, hash)
anc = nodes
}
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
total := 60
del := " "
var rows []string
for i := len(hashes) - 1; i >= 0; i-- {
var textlen int
hash := hashes[i]
for _, s := range hash {
textlen += len(s)
}
if total < textlen {
total = textlen + len(hash)
}
delsize := (total - textlen) / (len(hash) - 1)
if delsize > len(del) {
delsize = len(del)
}
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
rows = append(rows, row)
}
rows = append(rows, strings.Join(left, " "))
rows = append(rows, strings.Join(right, " "))
return strings.Join(rows, "\n") + "\n"
}
// newTree initialises a tree by building up the nodes of a BMT
// - segment size is stipulated to be the size of the hash
func newTree(segmentSize, depth int) *tree {
n := newNode(0, nil)
prevlevel := []*node{n}
// iterate over levels and creates 2^(depth-level) nodes
count := 2
for level := depth - 2; level >= 0; level-- {
nodes := make([]*node, count)
for i := 0; i < count; i++ {
parent := prevlevel[i/2]
nodes[i] = newNode(i, parent)
}
prevlevel = nodes
count *= 2
}
// the datanode level is the nodes on the last level
return &tree{
leaves: prevlevel,
result: make(chan []byte, 1),
segment: make([]byte, segmentSize),
section: make([]byte, 2*segmentSize),
}
}
// methods needed by hash.Hash
// Size returns the size
func (h *Hasher) Size() int {
return h.pool.SegmentSize
}
// BlockSize returns the block size
func (h *Hasher) BlockSize() int {
return h.pool.SegmentSize
}
// Hash hashes the data and the span using the bmt hasher
func Hash(h *Hasher, span, data []byte) []byte {
h.ResetWithLength(span)
h.Write(data)
return h.Sum(nil)
}
// Datalength returns the maximum data size that is hashed by the hasher =
// segment count times segment size
func (h *Hasher) DataLength() int {
return h.pool.Datalength
}
// Sum returns the hash of the buffer
// hash.Hash interface Sum method appends the byte slice to the underlying
// data before it calculates and returns the hash of the chunk
// caller must make sure Sum is not called concurrently with Write, writeSection
// and WriteSegment (TODO:)
func (h *Hasher) Sum(b []byte) (r []byte) {
return h.sum(b, true, true)
}
// sum implements Sum taking parameters
// * if the tree is released right away
// * if sequential write is used (can read sections)
func (h *Hasher) sum(b []byte, release, section bool) (r []byte) {
t := h.bmt
h.finalise(section)
if t.offset > 0 { // get the last node (double segment)
// padding the segment with zero
copy(t.segment[t.offset:], h.pool.zerohashes[0])
}
if section {
if t.cur%2 == 1 {
// if just finished current segment, copy it to the right half of the chunk
copy(t.section[h.pool.SegmentSize:], t.segment)
} else {
// copy segment to front of section, zero pad the right half
copy(t.section, t.segment)
copy(t.section[h.pool.SegmentSize:], h.pool.zerohashes[0])
}
h.writeSection(t.cur, t.section)
} else {
// TODO: h.writeSegment(t.cur, t.segment)
panic("SegmentWriter not implemented")
}
bmtHash := <-t.result
span := t.span
if release {
h.releaseTree()
}
// sha3(span + BMT(pure_chunk))
if span == nil {
return bmtHash
}
bh := h.pool.hasher()
bh.Reset()
bh.Write(span)
bh.Write(bmtHash)
return bh.Sum(b)
}
// Hasher implements the SwarmHash interface
// Hasher implements the io.Writer interface
// Write fills the buffer to hash,
// with every full segment calls writeSection
func (h *Hasher) Write(b []byte) (int, error) {
l := len(b)
if l <= 0 {
return 0, nil
}
t := h.bmt
need := (h.pool.SegmentCount - t.cur) * h.pool.SegmentSize
if l < need {
need = l
}
// calculate missing bit to complete current open segment
rest := h.pool.SegmentSize - t.offset
if need < rest {
rest = need
}
copy(t.segment[t.offset:], b[:rest])
need -= rest
size := (t.offset + rest) % h.pool.SegmentSize
// read full segments and the last possibly partial segment
for need > 0 {
// push all finished chunks we read
if t.cur%2 == 0 {
copy(t.section, t.segment)
} else {
copy(t.section[h.pool.SegmentSize:], t.segment)
h.writeSection(t.cur, t.section)
}
size = h.pool.SegmentSize
if need < size {
size = need
}
copy(t.segment, b[rest:rest+size])
need -= size
rest += size
t.cur++
}
t.offset = size % h.pool.SegmentSize
return l, nil
}
// Reset needs to be called before writing to the hasher
func (h *Hasher) Reset() {
h.getTree()
}
// Hasher implements the SwarmHash interface
// ResetWithLength needs to be called before writing to the hasher
// the argument is supposed to be the byte slice binary representation of
// the length of the data subsumed under the hash, i.e., span
func (h *Hasher) ResetWithLength(span []byte) {
h.Reset()
h.bmt.span = span
}
// releaseTree gives back the Tree to the pool whereby it unlocks
// it resets tree, segment and index
func (h *Hasher) releaseTree() {
t := h.bmt
if t != nil {
t.cur = 0
t.offset = 0
t.span = nil
t.hash = nil
h.bmt = nil
h.pool.release(t)
}
}
// TODO: writeSegment writes the ith segment into the BMT tree
// func (h *Hasher) writeSegment(i int, s []byte) {
// go h.run(h.bmt.leaves[i/2], h.pool.hasher(), i%2 == 0, s)
// }
// writeSection writes the hash of i/2-th segction into right level 1 node of the BMT tree
func (h *Hasher) writeSection(i int, section []byte) {
n := h.bmt.leaves[i/2]
isLeft := n.isLeft
n = n.parent
bh := h.pool.hasher()
bh.Write(section)
go func() {
sum := bh.Sum(nil)
if n == nil {
h.bmt.result <- sum
return
}
h.run(n, bh, isLeft, sum)
}()
}
// run pushes the data to the node
// if it is the first of 2 sisters written the routine returns
// if it is the second, it calculates the hash and writes it
// to the parent node recursively
func (h *Hasher) run(n *node, bh hash.Hash, isLeft bool, s []byte) {
for {
if isLeft {
n.left = s
} else {
n.right = s
}
// the child-thread first arriving will quit
if n.toggle() {
return
}
// the second thread now can be sure both left and right children are written
// it calculates the hash of left|right and take it to the next level
bh.Reset()
bh.Write(n.left)
bh.Write(n.right)
s = bh.Sum(nil)
// at the root of the bmt just write the result to the result channel
if n.parent == nil {
h.bmt.result <- s
return
}
// otherwise iterate on parent
isLeft = n.isLeft
n = n.parent
}
}
// finalise is following the path starting from the final datasegment to the
// BMT root via parents
// for unbalanced trees it fills in the missing right sister nodes using
// the pool's lookup table for BMT subtree root hashes for all-zero sections
func (h *Hasher) finalise(skip bool) {
t := h.bmt
isLeft := t.cur%2 == 0
n := t.leaves[t.cur/2]
for level := 0; n != nil; level++ {
// when the final segment's path is going via left child node
// we include an all-zero subtree hash for the right level and toggle the node.
// when the path is going through right child node, nothing to do
if isLeft && !skip {
n.right = h.pool.zerohashes[level]
n.toggle()
}
skip = false
isLeft = n.isLeft
n = n.parent
}
}
// getTree obtains a BMT resource by reserving one from the pool
func (h *Hasher) getTree() *tree {
if h.bmt != nil {
return h.bmt
}
t := h.pool.reserve()
h.bmt = t
return t
}
// atomic bool toggle implementing a concurrent reusable 2-state object
// atomic addint with %2 implements atomic bool toggle
// it returns true if the toggler just put it in the active/waiting state
func (n *node) toggle() bool {
return atomic.AddInt32(&n.state, 1)%2 == 1
}
func hashstr(b []byte) string {
end := len(b)
if end > 4 {
end = 4
}
return fmt.Sprintf("%x", b[:end])
}
// calculateDepthFor calculates the depth (number of levels) in the BMT tree
func calculateDepthFor(n int) (d int) {
c := 2
for ; c < n; c *= 2 {
d++
}
return d + 1
}

@ -0,0 +1,85 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bmt is a simple nonconcurrent reference implementation for hashsize segment based
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
//
// This implementation does not take advantage of any paralellisms and uses
// far more memory than necessary, but it is easy to see that it is correct.
// It can be used for generating test cases for optimized implementations.
// There is extra check on reference hasher correctness in bmt_test.go
// * TestRefHasher
// * testBMTHasherCorrectness function
package bmt
import (
"hash"
)
// RefHasher is the non-optimized easy-to-read reference implementation of BMT
type RefHasher struct {
maxDataLength int // c * hashSize, where c = 2 ^ ceil(log2(count)), where count = ceil(length / hashSize)
sectionLength int // 2 * hashSize
hasher hash.Hash // base hash func (Keccak256 SHA3)
}
// NewRefHasher returns a new RefHasher
func NewRefHasher(hasher BaseHasherFunc, count int) *RefHasher {
h := hasher()
hashsize := h.Size()
c := 2
for ; c < count; c *= 2 {
}
return &RefHasher{
sectionLength: 2 * hashsize,
maxDataLength: c * hashsize,
hasher: h,
}
}
// Hash returns the BMT hash of the byte slice
// implements the SwarmHash interface
func (rh *RefHasher) Hash(data []byte) []byte {
// if data is shorter than the base length (maxDataLength), we provide padding with zeros
d := make([]byte, rh.maxDataLength)
length := len(data)
if length > rh.maxDataLength {
length = rh.maxDataLength
}
copy(d, data[:length])
return rh.hash(d, rh.maxDataLength)
}
// data has length maxDataLength = segmentSize * 2^k
// hash calls itself recursively on both halves of the given slice
// concatenates the results, and returns the hash of that
// if the length of d is 2 * segmentSize then just returns the hash of that section
func (rh *RefHasher) hash(data []byte, length int) []byte {
var section []byte
if length == rh.sectionLength {
// section contains two data segments (d)
section = data
} else {
// section contains hashes of left and right BMT subtreea
// to be calculated by calling hash recursively on left and right half of d
length /= 2
section = append(rh.hash(data[:length], length), rh.hash(data[length:], length)...)
}
rh.hasher.Reset()
rh.hasher.Write(section)
s := rh.hasher.Sum(nil)
return s
}

@ -0,0 +1,390 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bmt
import (
"bytes"
crand "crypto/rand"
"encoding/binary"
"fmt"
"io"
"math/rand"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/crypto/sha3"
)
// the actual data length generated (could be longer than max datalength of the BMT)
const BufferSize = 4128
func sha3hash(data ...[]byte) []byte {
h := sha3.NewKeccak256()
for _, v := range data {
h.Write(v)
}
return h.Sum(nil)
}
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
// all data lengths between 0 and 256 bytes
func TestRefHasher(t *testing.T) {
// the test struct is used to specify the expected BMT hash for
// segment counts between from and to and lengths from 1 to datalength
type test struct {
from int
to int
expected func([]byte) []byte
}
var tests []*test
// all lengths in [0,64] should be:
//
// sha3hash(data)
//
tests = append(tests, &test{
from: 1,
to: 2,
expected: func(d []byte) []byte {
data := make([]byte, 64)
copy(data, d)
return sha3hash(data)
},
})
// all lengths in [3,4] should be:
//
// sha3hash(
// sha3hash(data[:64])
// sha3hash(data[64:])
// )
//
tests = append(tests, &test{
from: 3,
to: 4,
expected: func(d []byte) []byte {
data := make([]byte, 128)
copy(data, d)
return sha3hash(sha3hash(data[:64]), sha3hash(data[64:]))
},
})
// all segmentCounts in [5,8] should be:
//
// sha3hash(
// sha3hash(
// sha3hash(data[:64])
// sha3hash(data[64:128])
// )
// sha3hash(
// sha3hash(data[128:192])
// sha3hash(data[192:])
// )
// )
//
tests = append(tests, &test{
from: 5,
to: 8,
expected: func(d []byte) []byte {
data := make([]byte, 256)
copy(data, d)
return sha3hash(sha3hash(sha3hash(data[:64]), sha3hash(data[64:128])), sha3hash(sha3hash(data[128:192]), sha3hash(data[192:])))
},
})
// run the tests
for _, x := range tests {
for segmentCount := x.from; segmentCount <= x.to; segmentCount++ {
for length := 1; length <= segmentCount*32; length++ {
t.Run(fmt.Sprintf("%d_segments_%d_bytes", segmentCount, length), func(t *testing.T) {
data := make([]byte, length)
if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF {
t.Fatal(err)
}
expected := x.expected(data)
actual := NewRefHasher(sha3.NewKeccak256, segmentCount).Hash(data)
if !bytes.Equal(actual, expected) {
t.Fatalf("expected %x, got %x", expected, actual)
}
})
}
}
}
}
func TestHasherCorrectness(t *testing.T) {
err := testHasher(testBaseHasher)
if err != nil {
t.Fatal(err)
}
}
func testHasher(f func(BaseHasherFunc, []byte, int, int) error) error {
data := newData(BufferSize)
hasher := sha3.NewKeccak256
size := hasher().Size()
counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128}
var err error
for _, count := range counts {
max := count * size
incr := 1
for n := 1; n <= max; n += incr {
err = f(hasher, data, n, count)
if err != nil {
return err
}
}
}
return nil
}
// Tests that the BMT hasher can be synchronously reused with poolsizes 1 and PoolSize
func TestHasherReuse(t *testing.T) {
t.Run(fmt.Sprintf("poolsize_%d", 1), func(t *testing.T) {
testHasherReuse(1, t)
})
t.Run(fmt.Sprintf("poolsize_%d", PoolSize), func(t *testing.T) {
testHasherReuse(PoolSize, t)
})
}
func testHasherReuse(poolsize int, t *testing.T) {
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, SegmentCount, poolsize)
defer pool.Drain(0)
bmt := New(pool)
for i := 0; i < 100; i++ {
data := newData(BufferSize)
n := rand.Intn(bmt.DataLength())
err := testHasherCorrectness(bmt, hasher, data, n, SegmentCount)
if err != nil {
t.Fatal(err)
}
}
}
// Tests if pool can be cleanly reused even in concurrent use
func TestBMTHasherConcurrentUse(t *testing.T) {
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, SegmentCount, PoolSize)
defer pool.Drain(0)
cycles := 100
errc := make(chan error)
for i := 0; i < cycles; i++ {
go func() {
bmt := New(pool)
data := newData(BufferSize)
n := rand.Intn(bmt.DataLength())
errc <- testHasherCorrectness(bmt, hasher, data, n, 128)
}()
}
LOOP:
for {
select {
case <-time.NewTimer(5 * time.Second).C:
t.Fatal("timed out")
case err := <-errc:
if err != nil {
t.Fatal(err)
}
cycles--
if cycles == 0 {
break LOOP
}
}
}
}
// helper function that creates a tree pool
func testBaseHasher(hasher BaseHasherFunc, d []byte, n, count int) error {
pool := NewTreePool(hasher, count, 1)
defer pool.Drain(0)
bmt := New(pool)
return testHasherCorrectness(bmt, hasher, d, n, count)
}
// helper function that compares reference and optimised implementations on
// correctness
func testHasherCorrectness(bmt *Hasher, hasher BaseHasherFunc, d []byte, n, count int) (err error) {
span := make([]byte, 8)
if len(d) < n {
n = len(d)
}
binary.BigEndian.PutUint64(span, uint64(n))
data := d[:n]
rbmt := NewRefHasher(hasher, count)
exp := sha3hash(span, rbmt.Hash(data))
got := Hash(bmt, span, data)
if !bytes.Equal(got, exp) {
return fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
}
return err
}
func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) }
func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) }
func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) }
func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) }
func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) }
func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) }
func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) }
func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) }
func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) }
func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) }
func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) }
func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) }
func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) }
func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) }
func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) }
func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) }
func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) }
func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) }
func BenchmarkBMTHasher_4k(t *testing.B) { benchmarkBMTHasher(4096, t) }
func BenchmarkBMTHasher_2k(t *testing.B) { benchmarkBMTHasher(4096/2, t) }
func BenchmarkBMTHasher_1k(t *testing.B) { benchmarkBMTHasher(4096/4, t) }
func BenchmarkBMTHasher_512b(t *testing.B) { benchmarkBMTHasher(4096/8, t) }
func BenchmarkBMTHasher_256b(t *testing.B) { benchmarkBMTHasher(4096/16, t) }
func BenchmarkBMTHasher_128b(t *testing.B) { benchmarkBMTHasher(4096/32, t) }
func BenchmarkBMTHasherNoPool_4k(t *testing.B) { benchmarkBMTHasherPool(1, 4096, t) }
func BenchmarkBMTHasherNoPool_2k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/2, t) }
func BenchmarkBMTHasherNoPool_1k(t *testing.B) { benchmarkBMTHasherPool(1, 4096/4, t) }
func BenchmarkBMTHasherNoPool_512b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/8, t) }
func BenchmarkBMTHasherNoPool_256b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/16, t) }
func BenchmarkBMTHasherNoPool_128b(t *testing.B) { benchmarkBMTHasherPool(1, 4096/32, t) }
func BenchmarkBMTHasherPool_4k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096, t) }
func BenchmarkBMTHasherPool_2k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/2, t) }
func BenchmarkBMTHasherPool_1k(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/4, t) }
func BenchmarkBMTHasherPool_512b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/8, t) }
func BenchmarkBMTHasherPool_256b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/16, t) }
func BenchmarkBMTHasherPool_128b(t *testing.B) { benchmarkBMTHasherPool(PoolSize, 4096/32, t) }
// benchmarks simple sha3 hash on chunks
func benchmarkSHA3(n int, t *testing.B) {
data := newData(n)
hasher := sha3.NewKeccak256
h := hasher()
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
h.Reset()
h.Write(data)
h.Sum(nil)
}
}
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
// doing it on n PoolSize each reusing the base hasher
// the premise is that this is the minimum computation needed for a BMT
// therefore this serves as a theoretical optimum for concurrent implementations
func benchmarkBMTBaseline(n int, t *testing.B) {
hasher := sha3.NewKeccak256
hashSize := hasher().Size()
data := newData(hashSize)
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
count := int32((n-1)/hashSize + 1)
wg := sync.WaitGroup{}
wg.Add(PoolSize)
var i int32
for j := 0; j < PoolSize; j++ {
go func() {
defer wg.Done()
h := hasher()
for atomic.AddInt32(&i, 1) < count {
h.Reset()
h.Write(data)
h.Sum(nil)
}
}()
}
wg.Wait()
}
}
// benchmarks BMT Hasher
func benchmarkBMTHasher(n int, t *testing.B) {
data := newData(n)
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, SegmentCount, PoolSize)
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
bmt := New(pool)
Hash(bmt, nil, data)
}
}
// benchmarks 100 concurrent bmt hashes with pool capacity
func benchmarkBMTHasherPool(poolsize, n int, t *testing.B) {
data := newData(n)
hasher := sha3.NewKeccak256
pool := NewTreePool(hasher, SegmentCount, poolsize)
cycles := 100
t.ReportAllocs()
t.ResetTimer()
wg := sync.WaitGroup{}
for i := 0; i < t.N; i++ {
wg.Add(cycles)
for j := 0; j < cycles; j++ {
go func() {
defer wg.Done()
bmt := New(pool)
Hash(bmt, nil, data)
}()
}
wg.Wait()
}
}
// benchmarks the reference hasher
func benchmarkRefHasher(n int, t *testing.B) {
data := newData(n)
hasher := sha3.NewKeccak256
rbmt := NewRefHasher(hasher, 128)
t.ReportAllocs()
t.ResetTimer()
for i := 0; i < t.N; i++ {
rbmt.Hash(data)
}
}
func newData(bufferSize int) []byte {
data := make([]byte, bufferSize)
_, err := io.ReadFull(crand.Reader, data)
if err != nil {
panic(err.Error())
}
return data
}

@ -25,6 +25,7 @@ import (
"bazil.org/fuse" "bazil.org/fuse"
"bazil.org/fuse/fs" "bazil.org/fuse/fs"
"github.com/ethereum/go-ethereum/swarm/log"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -49,6 +50,7 @@ type SwarmDir struct {
} }
func NewSwarmDir(fullpath string, minfo *MountInfo) *SwarmDir { func NewSwarmDir(fullpath string, minfo *MountInfo) *SwarmDir {
log.Debug("swarmfs", "NewSwarmDir", fullpath)
newdir := &SwarmDir{ newdir := &SwarmDir{
inode: NewInode(), inode: NewInode(),
name: filepath.Base(fullpath), name: filepath.Base(fullpath),
@ -62,6 +64,8 @@ func NewSwarmDir(fullpath string, minfo *MountInfo) *SwarmDir {
} }
func (sd *SwarmDir) Attr(ctx context.Context, a *fuse.Attr) error { func (sd *SwarmDir) Attr(ctx context.Context, a *fuse.Attr) error {
sd.lock.RLock()
defer sd.lock.RUnlock()
a.Inode = sd.inode a.Inode = sd.inode
a.Mode = os.ModeDir | 0700 a.Mode = os.ModeDir | 0700
a.Uid = uint32(os.Getuid()) a.Uid = uint32(os.Getuid())
@ -70,7 +74,7 @@ func (sd *SwarmDir) Attr(ctx context.Context, a *fuse.Attr) error {
} }
func (sd *SwarmDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) { func (sd *SwarmDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) {
log.Debug("swarmfs", "Lookup", req.Name)
for _, n := range sd.files { for _, n := range sd.files {
if n.name == req.Name { if n.name == req.Name {
return n, nil return n, nil
@ -85,6 +89,7 @@ func (sd *SwarmDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *f
} }
func (sd *SwarmDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { func (sd *SwarmDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
log.Debug("swarmfs ReadDirAll")
var children []fuse.Dirent var children []fuse.Dirent
for _, file := range sd.files { for _, file := range sd.files {
children = append(children, fuse.Dirent{Inode: file.inode, Type: fuse.DT_File, Name: file.name}) children = append(children, fuse.Dirent{Inode: file.inode, Type: fuse.DT_File, Name: file.name})
@ -96,6 +101,7 @@ func (sd *SwarmDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
} }
func (sd *SwarmDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) { func (sd *SwarmDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
log.Debug("swarmfs Create", "path", sd.path, "req.Name", req.Name)
newFile := NewSwarmFile(sd.path, req.Name, sd.mountInfo) newFile := NewSwarmFile(sd.path, req.Name, sd.mountInfo)
newFile.fileSize = 0 // 0 means, file is not in swarm yet and it is just created newFile.fileSize = 0 // 0 means, file is not in swarm yet and it is just created
@ -108,6 +114,7 @@ func (sd *SwarmDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *f
} }
func (sd *SwarmDir) Remove(ctx context.Context, req *fuse.RemoveRequest) error { func (sd *SwarmDir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
log.Debug("swarmfs Remove", "path", sd.path, "req.Name", req.Name)
if req.Dir && sd.directories != nil { if req.Dir && sd.directories != nil {
newDirs := []*SwarmDir{} newDirs := []*SwarmDir{}
@ -144,13 +151,11 @@ func (sd *SwarmDir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
} }
func (sd *SwarmDir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { func (sd *SwarmDir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
log.Debug("swarmfs Mkdir", "path", sd.path, "req.Name", req.Name)
newDir := NewSwarmDir(req.Name, sd.mountInfo) newDir := NewSwarmDir(filepath.Join(sd.path, req.Name), sd.mountInfo)
sd.lock.Lock() sd.lock.Lock()
defer sd.lock.Unlock() defer sd.lock.Unlock()
sd.directories = append(sd.directories, newDir) sd.directories = append(sd.directories, newDir)
return newDir, nil return newDir, nil
} }

@ -26,7 +26,7 @@ import (
"bazil.org/fuse" "bazil.org/fuse"
"bazil.org/fuse/fs" "bazil.org/fuse/fs"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/swarm/storage"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -50,7 +50,7 @@ type SwarmFile struct {
inode uint64 inode uint64
name string name string
path string path string
key storage.Key addr storage.Address
fileSize int64 fileSize int64
reader storage.LazySectionReader reader storage.LazySectionReader
@ -63,7 +63,7 @@ func NewSwarmFile(path, fname string, minfo *MountInfo) *SwarmFile {
inode: NewInode(), inode: NewInode(),
name: fname, name: fname,
path: path, path: path,
key: nil, addr: nil,
fileSize: -1, // -1 means , file already exists in swarm and you need to just get the size from swarm fileSize: -1, // -1 means , file already exists in swarm and you need to just get the size from swarm
reader: nil, reader: nil,
@ -73,33 +73,38 @@ func NewSwarmFile(path, fname string, minfo *MountInfo) *SwarmFile {
return newFile return newFile
} }
func (file *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error { func (sf *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error {
log.Debug("swarmfs Attr", "path", sf.path)
a.Inode = file.inode sf.lock.Lock()
defer sf.lock.Unlock()
a.Inode = sf.inode
//TODO: need to get permission as argument //TODO: need to get permission as argument
a.Mode = 0700 a.Mode = 0700
a.Uid = uint32(os.Getuid()) a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getegid()) a.Gid = uint32(os.Getegid())
if file.fileSize == -1 { if sf.fileSize == -1 {
reader := file.mountInfo.swarmApi.Retrieve(file.key) reader, _ := sf.mountInfo.swarmApi.Retrieve(sf.addr)
quitC := make(chan bool) quitC := make(chan bool)
size, err := reader.Size(quitC) size, err := reader.Size(quitC)
if err != nil { if err != nil {
log.Warn("Couldnt get size of file %s : %v", file.path, err) log.Error("Couldnt get size of file %s : %v", sf.path, err)
return err
} }
file.fileSize = size sf.fileSize = size
log.Trace("swarmfs Attr", "size", size)
close(quitC)
} }
a.Size = uint64(file.fileSize) a.Size = uint64(sf.fileSize)
return nil return nil
} }
func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
log.Debug("swarmfs Read", "path", sf.path, "req.String", req.String())
sf.lock.RLock() sf.lock.RLock()
defer sf.lock.RUnlock() defer sf.lock.RUnlock()
if sf.reader == nil { if sf.reader == nil {
sf.reader = sf.mountInfo.swarmApi.Retrieve(sf.key) sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(sf.addr)
} }
buf := make([]byte, req.Size) buf := make([]byte, req.Size)
n, err := sf.reader.ReadAt(buf, req.Offset) n, err := sf.reader.ReadAt(buf, req.Offset)
@ -108,26 +113,23 @@ func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse
} }
resp.Data = buf[:n] resp.Data = buf[:n]
sf.reader = nil sf.reader = nil
return err
return err
} }
func (sf *SwarmFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { func (sf *SwarmFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
log.Debug("swarmfs Write", "path", sf.path, "req.String", req.String())
if sf.fileSize == 0 && req.Offset == 0 { if sf.fileSize == 0 && req.Offset == 0 {
// A new file is created // A new file is created
err := addFileToSwarm(sf, req.Data, len(req.Data)) err := addFileToSwarm(sf, req.Data, len(req.Data))
if err != nil { if err != nil {
return err return err
} }
resp.Size = len(req.Data) resp.Size = len(req.Data)
} else if req.Offset <= sf.fileSize { } else if req.Offset <= sf.fileSize {
totalSize := sf.fileSize + int64(len(req.Data)) totalSize := sf.fileSize + int64(len(req.Data))
if totalSize > MaxAppendFileSize { if totalSize > MaxAppendFileSize {
log.Warn("Append file size reached (%v) : (%v)", sf.fileSize, len(req.Data)) log.Warn("swarmfs Append file size reached (%v) : (%v)", sf.fileSize, len(req.Data))
return errFileSizeMaxLimixReached return errFileSizeMaxLimixReached
} }
@ -137,9 +139,8 @@ func (sf *SwarmFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fu
} }
resp.Size = len(req.Data) resp.Size = len(req.Data)
} else { } else {
log.Warn("Invalid write request size(%v) : off(%v)", sf.fileSize, req.Offset) log.Warn("swarmfs Invalid write request size(%v) : off(%v)", sf.fileSize, req.Offset)
return errInvalidOffset return errInvalidOffset
} }
return nil return nil
} }

@ -39,12 +39,12 @@ var (
) )
type SwarmFS struct { type SwarmFS struct {
swarmApi *api.Api swarmApi *api.API
activeMounts map[string]*MountInfo activeMounts map[string]*MountInfo
swarmFsLock *sync.RWMutex swarmFsLock *sync.RWMutex
} }
func NewSwarmFS(api *api.Api) *SwarmFS { func NewSwarmFS(api *api.API) *SwarmFS {
swarmfsLock.Do(func() { swarmfsLock.Do(func() {
swarmfs = &SwarmFS{ swarmfs = &SwarmFS{
swarmApi: api, swarmApi: api,

File diff suppressed because it is too large Load Diff

@ -30,15 +30,16 @@ import (
"bazil.org/fuse" "bazil.org/fuse"
"bazil.org/fuse/fs" "bazil.org/fuse/fs"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/api" "github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/log"
) )
var ( var (
errEmptyMountPoint = errors.New("need non-empty mount point") errEmptyMountPoint = errors.New("need non-empty mount point")
errMaxMountCount = errors.New("max FUSE mount count reached") errNoRelativeMountPoint = errors.New("invalid path for mount point (need absolute path)")
errMountTimeout = errors.New("mount timeout") errMaxMountCount = errors.New("max FUSE mount count reached")
errAlreadyMounted = errors.New("mount point is already serving") errMountTimeout = errors.New("mount timeout")
errAlreadyMounted = errors.New("mount point is already serving")
) )
func isFUSEUnsupportedError(err error) bool { func isFUSEUnsupportedError(err error) bool {
@ -48,18 +49,20 @@ func isFUSEUnsupportedError(err error) bool {
return err == fuse.ErrOSXFUSENotFound return err == fuse.ErrOSXFUSENotFound
} }
// information about every active mount // MountInfo contains information about every active mount
type MountInfo struct { type MountInfo struct {
MountPoint string MountPoint string
StartManifest string StartManifest string
LatestManifest string LatestManifest string
rootDir *SwarmDir rootDir *SwarmDir
fuseConnection *fuse.Conn fuseConnection *fuse.Conn
swarmApi *api.Api swarmApi *api.API
lock *sync.RWMutex lock *sync.RWMutex
serveClose chan struct{}
} }
func NewMountInfo(mhash, mpoint string, sapi *api.Api) *MountInfo { func NewMountInfo(mhash, mpoint string, sapi *api.API) *MountInfo {
log.Debug("swarmfs NewMountInfo", "hash", mhash, "mount point", mpoint)
newMountInfo := &MountInfo{ newMountInfo := &MountInfo{
MountPoint: mpoint, MountPoint: mpoint,
StartManifest: mhash, StartManifest: mhash,
@ -68,50 +71,57 @@ func NewMountInfo(mhash, mpoint string, sapi *api.Api) *MountInfo {
fuseConnection: nil, fuseConnection: nil,
swarmApi: sapi, swarmApi: sapi,
lock: &sync.RWMutex{}, lock: &sync.RWMutex{},
serveClose: make(chan struct{}),
} }
return newMountInfo return newMountInfo
} }
func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) { func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
log.Info("swarmfs", "mounting hash", mhash, "mount point", mountpoint)
if mountpoint == "" { if mountpoint == "" {
return nil, errEmptyMountPoint return nil, errEmptyMountPoint
} }
if !strings.HasPrefix(mountpoint, "/") {
return nil, errNoRelativeMountPoint
}
cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint)) cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
if err != nil { if err != nil {
return nil, err return nil, err
} }
log.Trace("swarmfs mount", "cleanedMountPoint", cleanedMountPoint)
self.swarmFsLock.Lock() swarmfs.swarmFsLock.Lock()
defer self.swarmFsLock.Unlock() defer swarmfs.swarmFsLock.Unlock()
noOfActiveMounts := len(self.activeMounts) noOfActiveMounts := len(swarmfs.activeMounts)
log.Debug("swarmfs mount", "# active mounts", noOfActiveMounts)
if noOfActiveMounts >= maxFuseMounts { if noOfActiveMounts >= maxFuseMounts {
return nil, errMaxMountCount return nil, errMaxMountCount
} }
if _, ok := self.activeMounts[cleanedMountPoint]; ok { if _, ok := swarmfs.activeMounts[cleanedMountPoint]; ok {
return nil, errAlreadyMounted return nil, errAlreadyMounted
} }
log.Info(fmt.Sprintf("Attempting to mount %s ", cleanedMountPoint)) log.Trace("swarmfs mount: getting manifest tree")
_, manifestEntryMap, err := self.swarmApi.BuildDirectoryTree(mhash, true) _, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(mhash, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
mi := NewMountInfo(mhash, cleanedMountPoint, self.swarmApi) log.Trace("swarmfs mount: building mount info")
mi := NewMountInfo(mhash, cleanedMountPoint, swarmfs.swarmApi)
dirTree := map[string]*SwarmDir{} dirTree := map[string]*SwarmDir{}
rootDir := NewSwarmDir("/", mi) rootDir := NewSwarmDir("/", mi)
dirTree["/"] = rootDir log.Trace("swarmfs mount", "rootDir", rootDir)
mi.rootDir = rootDir mi.rootDir = rootDir
log.Trace("swarmfs mount: traversing manifest map")
for suffix, entry := range manifestEntryMap { for suffix, entry := range manifestEntryMap {
key := common.Hex2Bytes(entry.Hash) addr := common.Hex2Bytes(entry.Hash)
fullpath := "/" + suffix fullpath := "/" + suffix
basepath := filepath.Dir(fullpath) basepath := filepath.Dir(fullpath)
parentDir := rootDir parentDir := rootDir
dirUntilNow := "" dirUntilNow := ""
paths := strings.Split(basepath, "/") paths := strings.Split(basepath, "/")
@ -128,105 +138,143 @@ func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
} else { } else {
parentDir = dirTree[dirUntilNow] parentDir = dirTree[dirUntilNow]
} }
} }
} }
thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi) thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi)
thisFile.key = key thisFile.addr = addr
parentDir.files = append(parentDir.files, thisFile) parentDir.files = append(parentDir.files, thisFile)
} }
fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash)) fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
if isFUSEUnsupportedError(err) { if isFUSEUnsupportedError(err) {
log.Warn("Fuse not installed", "mountpoint", cleanedMountPoint, "err", err) log.Error("swarmfs error - FUSE not installed", "mountpoint", cleanedMountPoint, "err", err)
return nil, err return nil, err
} else if err != nil { } else if err != nil {
fuse.Unmount(cleanedMountPoint) fuse.Unmount(cleanedMountPoint)
log.Warn("Error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err) log.Error("swarmfs error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
return nil, err return nil, err
} }
mi.fuseConnection = fconn mi.fuseConnection = fconn
serverr := make(chan error, 1) serverr := make(chan error, 1)
go func() { go func() {
log.Info(fmt.Sprintf("Serving %s at %s", mhash, cleanedMountPoint)) log.Info("swarmfs", "serving hash", mhash, "at", cleanedMountPoint)
filesys := &SwarmRoot{root: rootDir} filesys := &SwarmRoot{root: rootDir}
//start serving the actual file system; see note below
if err := fs.Serve(fconn, filesys); err != nil { if err := fs.Serve(fconn, filesys); err != nil {
log.Warn(fmt.Sprintf("Could not Serve SwarmFileSystem error: %v", err)) log.Warn("swarmfs could not serve the requested hash", "error", err)
serverr <- err serverr <- err
} }
mi.serveClose <- struct{}{}
}() }()
/*
IMPORTANT NOTE: the fs.Serve function is blocking;
Serve builds up the actual fuse file system by calling the
Attr functions on each SwarmFile, creating the file inodes;
specifically calling the swarm's LazySectionReader.Size() to set the file size.
This can take some time, and it appears that if we access the fuse file system
too early, we can bring the tests to deadlock. The assumption so far is that
at this point, the fuse driver didn't finish to initialize the file system.
Accessing files too early not only deadlocks the tests, but locks the access
of the fuse file completely, resulting in blocked resources at OS system level.
Even a simple `ls /tmp/testDir/testMountDir` could deadlock in a shell.
Workaround so far is to wait some time to give the OS enough time to initialize
the fuse file system. During tests, this seemed to address the issue.
HOWEVER IT SHOULD BE NOTED THAT THIS MAY ONLY BE AN EFFECT,
AND THE DEADLOCK CAUSED BY SOMETHING ELSE BLOCKING ACCESS DUE TO SOME RACE CONDITION
(caused in the bazil.org library and/or the SwarmRoot, SwarmDir and SwarmFile implementations)
*/
time.Sleep(2 * time.Second)
timer := time.NewTimer(mountTimeout)
defer timer.Stop()
// Check if the mount process has an error to report. // Check if the mount process has an error to report.
select { select {
case <-time.After(mountTimeout): case <-timer.C:
fuse.Unmount(cleanedMountPoint) log.Warn("swarmfs timed out mounting over FUSE", "mountpoint", cleanedMountPoint, "err", err)
err := fuse.Unmount(cleanedMountPoint)
if err != nil {
return nil, err
}
return nil, errMountTimeout return nil, errMountTimeout
case err := <-serverr: case err := <-serverr:
fuse.Unmount(cleanedMountPoint) log.Warn("swarmfs error serving over FUSE", "mountpoint", cleanedMountPoint, "err", err)
log.Warn("Error serving swarm FUSE FS", "mountpoint", cleanedMountPoint, "err", err) err = fuse.Unmount(cleanedMountPoint)
return nil, err return nil, err
case <-fconn.Ready: case <-fconn.Ready:
log.Info("Now serving swarm FUSE FS", "manifest", mhash, "mountpoint", cleanedMountPoint) //this signals that the actual mount point from the fuse.Mount call is ready;
//it does not signal though that the file system from fs.Serve is actually fully built up
if err := fconn.MountError; err != nil {
log.Error("Mounting error from fuse driver: ", "err", err)
return nil, err
}
log.Info("swarmfs now served over FUSE", "manifest", mhash, "mountpoint", cleanedMountPoint)
} }
self.activeMounts[cleanedMountPoint] = mi timer.Stop()
swarmfs.activeMounts[cleanedMountPoint] = mi
return mi, nil return mi, nil
} }
func (self *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) { func (swarmfs *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
swarmfs.swarmFsLock.Lock()
self.swarmFsLock.Lock() defer swarmfs.swarmFsLock.Unlock()
defer self.swarmFsLock.Unlock()
cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint)) cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
if err != nil { if err != nil {
return nil, err return nil, err
} }
mountInfo := self.activeMounts[cleanedMountPoint] mountInfo := swarmfs.activeMounts[cleanedMountPoint]
if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint { if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint {
return nil, fmt.Errorf("%s is not mounted", cleanedMountPoint) return nil, fmt.Errorf("swarmfs %s is not mounted", cleanedMountPoint)
} }
err = fuse.Unmount(cleanedMountPoint) err = fuse.Unmount(cleanedMountPoint)
if err != nil { if err != nil {
err1 := externalUnmount(cleanedMountPoint) err1 := externalUnmount(cleanedMountPoint)
if err1 != nil { if err1 != nil {
errStr := fmt.Sprintf("UnMount error: %v", err) errStr := fmt.Sprintf("swarmfs unmount error: %v", err)
log.Warn(errStr) log.Warn(errStr)
return nil, err1 return nil, err1
} }
} }
mountInfo.fuseConnection.Close() err = mountInfo.fuseConnection.Close()
delete(self.activeMounts, cleanedMountPoint) if err != nil {
return nil, err
}
delete(swarmfs.activeMounts, cleanedMountPoint)
succString := fmt.Sprintf("UnMounting %v succeeded", cleanedMountPoint) <-mountInfo.serveClose
succString := fmt.Sprintf("swarmfs unmounting %v succeeded", cleanedMountPoint)
log.Info(succString) log.Info(succString)
return mountInfo, nil return mountInfo, nil
} }
func (self *SwarmFS) Listmounts() []*MountInfo { func (swarmfs *SwarmFS) Listmounts() []*MountInfo {
self.swarmFsLock.RLock() swarmfs.swarmFsLock.RLock()
defer self.swarmFsLock.RUnlock() defer swarmfs.swarmFsLock.RUnlock()
rows := make([]*MountInfo, 0, len(swarmfs.activeMounts))
rows := make([]*MountInfo, 0, len(self.activeMounts)) for _, mi := range swarmfs.activeMounts {
for _, mi := range self.activeMounts {
rows = append(rows, mi) rows = append(rows, mi)
} }
return rows return rows
} }
func (self *SwarmFS) Stop() bool { func (swarmfs *SwarmFS) Stop() bool {
for mp := range self.activeMounts { for mp := range swarmfs.activeMounts {
mountInfo := self.activeMounts[mp] mountInfo := swarmfs.activeMounts[mp]
self.Unmount(mountInfo.MountPoint) swarmfs.Unmount(mountInfo.MountPoint)
} }
return true return true
} }

@ -24,7 +24,7 @@ import (
"os/exec" "os/exec"
"runtime" "runtime"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/log"
) )
func externalUnmount(mountPoint string) error { func externalUnmount(mountPoint string) error {
@ -38,11 +38,11 @@ func externalUnmount(mountPoint string) error {
// Try FUSE-specific commands if umount didn't work. // Try FUSE-specific commands if umount didn't work.
switch runtime.GOOS { switch runtime.GOOS {
case "darwin": case "darwin":
return exec.CommandContext(ctx, "diskutil", "umount", "force", mountPoint).Run() return exec.CommandContext(ctx, "diskutil", "umount", mountPoint).Run()
case "linux": case "linux":
return exec.CommandContext(ctx, "fusermount", "-u", mountPoint).Run() return exec.CommandContext(ctx, "fusermount", "-u", mountPoint).Run()
default: default:
return fmt.Errorf("unmount: unimplemented") return fmt.Errorf("swarmfs unmount: unimplemented")
} }
} }
@ -54,14 +54,14 @@ func addFileToSwarm(sf *SwarmFile, content []byte, size int) error {
sf.lock.Lock() sf.lock.Lock()
defer sf.lock.Unlock() defer sf.lock.Unlock()
sf.key = fkey sf.addr = fkey
sf.fileSize = int64(size) sf.fileSize = int64(size)
sf.mountInfo.lock.Lock() sf.mountInfo.lock.Lock()
defer sf.mountInfo.lock.Unlock() defer sf.mountInfo.lock.Unlock()
sf.mountInfo.LatestManifest = mhash sf.mountInfo.LatestManifest = mhash
log.Info("Added new file:", "fname", sf.name, "New Manifest hash", mhash) log.Info("swarmfs added new file:", "fname", sf.name, "new Manifest hash", mhash)
return nil return nil
} }
@ -75,7 +75,7 @@ func removeFileFromSwarm(sf *SwarmFile) error {
defer sf.mountInfo.lock.Unlock() defer sf.mountInfo.lock.Unlock()
sf.mountInfo.LatestManifest = mkey sf.mountInfo.LatestManifest = mkey
log.Info("Removed file:", "fname", sf.name, "New Manifest hash", mkey) log.Info("swarmfs removed file:", "fname", sf.name, "new Manifest hash", mkey)
return nil return nil
} }
@ -102,20 +102,20 @@ func removeDirectoryFromSwarm(sd *SwarmDir) error {
} }
func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error { func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error {
fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.key, offset, length, true) fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true)
if err != nil { if err != nil {
return err return err
} }
sf.lock.Lock() sf.lock.Lock()
defer sf.lock.Unlock() defer sf.lock.Unlock()
sf.key = fkey sf.addr = fkey
sf.fileSize = sf.fileSize + int64(len(content)) sf.fileSize = sf.fileSize + int64(len(content))
sf.mountInfo.lock.Lock() sf.mountInfo.lock.Lock()
defer sf.mountInfo.lock.Unlock() defer sf.mountInfo.lock.Unlock()
sf.mountInfo.LatestManifest = mhash sf.mountInfo.LatestManifest = mhash
log.Info("Appended file:", "fname", sf.name, "New Manifest hash", mhash) log.Info("swarmfs appended file:", "fname", sf.name, "new Manifest hash", mhash)
return nil return nil
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,48 @@
package log
import (
l "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
const (
// CallDepth is set to 1 in order to influence to reported line number of
// the log message with 1 skipped stack frame of calling l.Output()
CallDepth = 1
)
// Warn is a convenient alias for log.Warn with stats
func Warn(msg string, ctx ...interface{}) {
metrics.GetOrRegisterCounter("warn", nil).Inc(1)
l.Output(msg, l.LvlWarn, CallDepth, ctx...)
}
// Error is a convenient alias for log.Error with stats
func Error(msg string, ctx ...interface{}) {
metrics.GetOrRegisterCounter("error", nil).Inc(1)
l.Output(msg, l.LvlError, CallDepth, ctx...)
}
// Crit is a convenient alias for log.Crit with stats
func Crit(msg string, ctx ...interface{}) {
metrics.GetOrRegisterCounter("crit", nil).Inc(1)
l.Output(msg, l.LvlCrit, CallDepth, ctx...)
}
// Info is a convenient alias for log.Info with stats
func Info(msg string, ctx ...interface{}) {
metrics.GetOrRegisterCounter("info", nil).Inc(1)
l.Output(msg, l.LvlInfo, CallDepth, ctx...)
}
// Debug is a convenient alias for log.Debug with stats
func Debug(msg string, ctx ...interface{}) {
metrics.GetOrRegisterCounter("debug", nil).Inc(1)
l.Output(msg, l.LvlDebug, CallDepth, ctx...)
}
// Trace is a convenient alias for log.Trace with stats
func Trace(msg string, ctx ...interface{}) {
metrics.GetOrRegisterCounter("trace", nil).Inc(1)
l.Output(msg, l.LvlTrace, CallDepth, ctx...)
}

@ -20,9 +20,9 @@ import (
"time" "time"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/log"
gethmetrics "github.com/ethereum/go-ethereum/metrics" gethmetrics "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/influxdb" "github.com/ethereum/go-ethereum/metrics/influxdb"
"github.com/ethereum/go-ethereum/swarm/log"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )

@ -0,0 +1,92 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package multihash
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
)
const (
defaultMultihashLength = 32
defaultMultihashTypeCode = 0x1b
)
var (
multihashTypeCode uint8
MultihashLength = defaultMultihashLength
)
func init() {
multihashTypeCode = defaultMultihashTypeCode
MultihashLength = defaultMultihashLength
}
// check if valid swarm multihash
func isSwarmMultihashType(code uint8) bool {
return code == multihashTypeCode
}
// GetMultihashLength returns the digest length of the provided multihash
// It will fail if the multihash is not a valid swarm mulithash
func GetMultihashLength(data []byte) (int, int, error) {
cursor := 0
typ, c := binary.Uvarint(data)
if c <= 0 {
return 0, 0, errors.New("unreadable hashtype field")
}
if !isSwarmMultihashType(uint8(typ)) {
return 0, 0, fmt.Errorf("hash code %x is not a swarm hashtype", typ)
}
cursor += c
hashlength, c := binary.Uvarint(data[cursor:])
if c <= 0 {
return 0, 0, errors.New("unreadable length field")
}
cursor += c
// we cheekily assume hashlength < maxint
inthashlength := int(hashlength)
if len(data[c:]) < inthashlength {
return 0, 0, errors.New("length mismatch")
}
return inthashlength, cursor, nil
}
// FromMulithash returns the digest portion of the multihash
// It will fail if the multihash is not a valid swarm multihash
func FromMultihash(data []byte) ([]byte, error) {
hashLength, _, err := GetMultihashLength(data)
if err != nil {
return nil, err
}
return data[len(data)-hashLength:], nil
}
// ToMulithash wraps the provided digest data with a swarm mulithash header
func ToMultihash(hashData []byte) []byte {
buf := bytes.NewBuffer(nil)
b := make([]byte, 8)
c := binary.PutUvarint(b, uint64(multihashTypeCode))
buf.Write(b[:c])
c = binary.PutUvarint(b, uint64(len(hashData)))
buf.Write(b[:c])
buf.Write(hashData)
return buf.Bytes()
}

@ -0,0 +1,53 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package multihash
import (
"bytes"
"math/rand"
"testing"
)
// parse multihash, and check that invalid multihashes fail
func TestCheckMultihash(t *testing.T) {
hashbytes := make([]byte, 32)
c, err := rand.Read(hashbytes)
if err != nil {
t.Fatal(err)
} else if c < 32 {
t.Fatal("short read")
}
expected := ToMultihash(hashbytes)
l, hl, _ := GetMultihashLength(expected)
if l != 32 {
t.Fatalf("expected length %d, got %d", 32, l)
} else if hl != 2 {
t.Fatalf("expected header length %d, got %d", 2, hl)
}
if _, _, err := GetMultihashLength(expected[1:]); err == nil {
t.Fatal("expected failure on corrupt header")
}
if _, _, err := GetMultihashLength(expected[:len(expected)-2]); err == nil {
t.Fatal("expected failure on short content")
}
dh, _ := FromMultihash(expected)
if !bytes.Equal(dh, hashbytes) {
t.Fatalf("expected content hash %x, got %x", hashbytes, dh)
}
}

@ -0,0 +1,152 @@
## Streaming
Streaming is a new protocol of the swarm bzz bundle of protocols.
This protocol provides the basic logic for chunk-based data flow.
It implements simple retrieve requests and delivery using priority queue.
A data exchange stream is a directional flow of chunks between peers.
The source of datachunks is the upstream, the receiver is called the
downstream peer. Each streaming protocol defines an outgoing streamer
and an incoming streamer, the former installing on the upstream,
the latter on the downstream peer.
Subscribe on StreamerPeer launches an incoming streamer that sends
a subscribe msg upstream. The streamer on the upstream peer
handles the subscribe msg by installing the relevant outgoing streamer
. The modules now engage in a process of upstream sending a sequence of hashes of
chunks downstream (OfferedHashesMsg). The downstream peer evaluates which hashes are needed
and get it delivered by sending back a msg (WantedHashesMsg).
Historical syncing is supported - currently not the right abstraction --
state kept across sessions by saving a series of intervals after their last
batch actually arrived.
Live streaming is also supported, by starting session from the first item
after the subscription.
Provable data exchange. In case a stream represents a swarm document's data layer
or higher level chunks, streaming up to a certain index is always provable. It saves on
sending intermediate chunks.
Using the streamer logic, various stream types are easy to implement:
* light node requests:
* url lookup with offset
* document download
* document upload
* syncing
* live session syncing
* historical syncing
* simple retrieve requests and deliveries
* mutable resource updates streams
* receipting for finger pointing
## Syncing
Syncing is the process that makes sure storer nodes end up storing all and only the chunks that are requested from them.
### Requirements
- eventual consistency: so each chunk historical should be syncable
- since the same chunk can and will arrive from many peers, (network traffic should be
optimised, only one transfer of data per chunk)
- explicit request deliveries should be prioritised higher than recent chunks received
during the ongoing session which in turn should be higher than historical chunks.
- insured chunks should get receipted for finger pointing litigation, the receipts storage
should be organised efficiently, upstream peer should also be able to find these
receipts for a deleted chunk easily to refute their challenge.
- syncing should be resilient to cut connections, metadata should be persisted that
keep track of syncing state across sessions, historical syncing state should survive restart
- extra data structures to support syncing should be kept at minimum
- syncing is organized separately for chunk types (resource update v content chunk)
- various types of streams should have common logic abstracted
Syncing is now entirely mediated by the localstore, ie., no processes or memory leaks due to network contention.
When a new chunk is stored, its chunk hash is index by proximity bin
peers syncronise by getting the chunks closer to the downstream peer than to the upstream one.
Consequently peers just sync all stored items for the kad bin the receiving peer falls into.
The special case of nearest neighbour sets is handled by the downstream peer
indicating they want to sync all kademlia bins with proximity equal to or higher
than their depth.
This sync state represents the initial state of a sync connection session.
Retrieval is dictated by downstream peers simply using a special streamer protocol.
Syncing chunks created during the session by the upstream peer is called live session syncing
while syncing of earlier chunks is historical syncing.
Once the relevant chunk is retrieved, downstream peer looks up all hash segments in its localstore
and sends to the upstream peer a message with a a bitvector to indicate
missing chunks (e.g., for chunk `k`, hash with chunk internal index which case )
new items. In turn upstream peer sends the relevant chunk data alongside their index.
On sending chunks there is a priority queue system. If during looking up hashes in its localstore,
downstream peer hits on an open request then a retrieve request is sent immediately to the upstream peer indicating
that no extra round of checks is needed. If another peers syncer hits the same open request, it is slightly unsafe to not ask
that peer too: if the first one disconnects before delivering or fails to deliver and therefore gets
disconnected, we should still be able to continue with the other. The minimum redundant traffic coming from such simultaneous
eventualities should be sufficiently rare not to warrant more complex treatment.
Session syncing involves downstream peer to request a new state on a bin from upstream.
using the new state, the range (of chunks) between the previous state and the new one are retrieved
and chunks are requested identical to the historical case. After receiving all the missing chunks
from the new hashes, downstream peer will request a new range. If this happens before upstream peer updates a new state,
we say that session syncing is live or the two peers are in sync. In general the time interval passed since downstream peer request up to the current session cursor is a good indication of a permanent (probably increasing) lag.
If there is no historical backlog, and downstream peer has an acceptable 'last synced' tag, then it is said to be fully synced with the upstream peer.
If a peer is fully synced with all its storer peers, it can advertise itself as globally fully synced.
The downstream peer persists the record of the last synced offset. When the two peers disconnect and
reconnect syncing can start from there.
This situation however can also happen while historical syncing is not yet complete.
Effectively this means that the peer needs to persist a record of an arbitrary array of offset ranges covered.
### Delivery requests
once the appropriate ranges of the hashstream are retrieved and buffered, downstream peer just scans the hashes, looks them up in localstore, if not found, create a request entry.
The range is referenced by the chunk index. Alongside the name (indicating the stream, e.g., content chunks for bin 6) and the range
downstream peer sends a 128 long bitvector indicating which chunks are needed.
Newly created requests are satisfied bound together in a waitgroup which when done, will promptt sending the next one.
to be able to do check and storage concurrently, we keep a buffer of one, we start with two batches of hashes.
If there is nothing to give, upstream peers SetNextBatch is blocking. Subscription ends with an unsubscribe. which removes the syncer from the map.
Canceling requests (for instance the late chunks of an erasure batch) should be a chan closed
on the request
Simple request is also a subscribe
different streaming protocols are different p2p protocols with same message types.
the constructor is the Run function itself. which takes a streamerpeer as argument
### provable streams
The swarm hash over the hash stream has many advantages. It implements a provable data transfer
and provide efficient storage for receipts in the form of inclusion proofs useable for finger pointing litigation.
When challenged on a missing chunk, upstream peer will provide an inclusion proof of a chunk hash against the state of the
sync stream. In order to be able to generate such an inclusion proof, upstream peer needs to store the hash index (counting consecutive hash-size segments) alongside the chunk data and preserve it even when the chunk data is deleted until the chunk is no longer insured.
if there is no valid insurance on the files the entry may be deleted.
As long as the chunk is preserved, no takeover proof will be needed since the node can respond to any challenge.
However, once the node needs to delete an insured chunk for capacity reasons, a receipt should be available to
refute the challenge by finger pointing to a downstream peer.
As part of the deletion protocol then, hashes of insured chunks to be removed are pushed to an infinite stream for every bin.
Downstream peer on the other hand needs to make sure that they can only be finger pointed about a chunk they did receive and store.
For this the check of a state should be exhaustive. If historical syncing finishes on one state, all hashes before are covered, no
surprises. In other words historical syncing this process is self verifying. With session syncing however, it is not enough to check going back covering the range from old offset to new. Continuity (i.e., that the new state is extension of the old) needs to be verified: after downstream peer reads the range into a buffer, it appends the buffer the last known state at the last known offset and verifies the resulting hash matches
the latest state. Past intervals of historical syncing are checked via the the session root.
Upstream peer signs the states, downstream peers can use as handover proofs.
Downstream peers sign off on a state together with an initial offset.
Once historical syncing is complete and the session does not lag, downstream peer only preserves the latest upstream state and store the signed version.
Upstream peer needs to keep the latest takeover states: each deleted chunk's hash should be covered by takeover proof of at least one peer. If historical syncing is complete, upstream peer typically will store only the latest takeover proof from downstream peer.
Crucially, the structure is totally independent of the number of peers in the bin, so it scales extremely well.
## implementation
The simplest protocol just involves upstream peer to prefix the key with the kademlia proximity order (say 0-15 or 0-31)
and simply iterate on index per bin when syncing with a peer.
priority queues are used for sending chunks so that user triggered requests should be responded to first, session syncing second, and historical with lower priority.
The request on chunks remains implemented as a dataless entry in the memory store.
The lifecycle of this object should be more carefully thought through, ie., when it fails to retrieve it should be removed.

@ -0,0 +1,66 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bitvector
import (
"errors"
)
var errInvalidLength = errors.New("invalid length")
type BitVector struct {
len int
b []byte
}
func New(l int) (bv *BitVector, err error) {
return NewFromBytes(make([]byte, l/8+1), l)
}
func NewFromBytes(b []byte, l int) (bv *BitVector, err error) {
if l <= 0 {
return nil, errInvalidLength
}
if len(b)*8 < l {
return nil, errInvalidLength
}
return &BitVector{
len: l,
b: b,
}, nil
}
func (bv *BitVector) Get(i int) bool {
bi := i / 8
return bv.b[bi]&(0x1<<uint(i%8)) != 0
}
func (bv *BitVector) Set(i int, v bool) {
bi := i / 8
cv := bv.Get(i)
if cv != v {
bv.b[bi] ^= 0x1 << uint8(i%8)
}
}
func (bv *BitVector) Bytes() []byte {
return bv.b
}
func (bv *BitVector) Length() int {
return bv.len
}

@ -0,0 +1,104 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bitvector
import "testing"
func TestBitvectorNew(t *testing.T) {
_, err := New(0)
if err != errInvalidLength {
t.Errorf("expected err %v, got %v", errInvalidLength, err)
}
_, err = NewFromBytes(nil, 0)
if err != errInvalidLength {
t.Errorf("expected err %v, got %v", errInvalidLength, err)
}
_, err = NewFromBytes([]byte{0}, 9)
if err != errInvalidLength {
t.Errorf("expected err %v, got %v", errInvalidLength, err)
}
_, err = NewFromBytes(make([]byte, 8), 8)
if err != nil {
t.Error(err)
}
}
func TestBitvectorGetSet(t *testing.T) {
for _, length := range []int{
1,
2,
4,
8,
9,
15,
16,
} {
bv, err := New(length)
if err != nil {
t.Errorf("error for length %v: %v", length, err)
}
for i := 0; i < length; i++ {
if bv.Get(i) {
t.Errorf("expected false for element on index %v", i)
}
}
func() {
defer func() {
if err := recover(); err == nil {
t.Errorf("expecting panic")
}
}()
bv.Get(length + 8)
}()
for i := 0; i < length; i++ {
bv.Set(i, true)
for j := 0; j < length; j++ {
if j == i {
if !bv.Get(j) {
t.Errorf("element on index %v is not set to true", i)
}
} else {
if bv.Get(j) {
t.Errorf("element on index %v is not false", i)
}
}
}
bv.Set(i, false)
if bv.Get(i) {
t.Errorf("element on index %v is not set to false", i)
}
}
}
}
func TestBitvectorNewFromBytesGet(t *testing.T) {
bv, err := NewFromBytes([]byte{8}, 8)
if err != nil {
t.Error(err)
}
if !bv.Get(3) {
t.Fatalf("element 3 is not set to true: state %08b", bv.b[0])
}
}

@ -0,0 +1,30 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"fmt"
"strings"
)
func LogAddrs(nns [][]byte) string {
var nnsa []string
for _, nn := range nns {
nnsa = append(nnsa, fmt.Sprintf("%08x", nn[:4]))
}
return strings.Join(nnsa, ", ")
}

@ -1,232 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"bytes"
"encoding/binary"
"fmt"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/storage"
)
//metrics variables
var (
syncReceiveCount = metrics.NewRegisteredCounter("network.sync.recv.count", nil)
syncReceiveIgnore = metrics.NewRegisteredCounter("network.sync.recv.ignore", nil)
syncSendCount = metrics.NewRegisteredCounter("network.sync.send.count", nil)
syncSendRefused = metrics.NewRegisteredCounter("network.sync.send.refused", nil)
syncSendNotFound = metrics.NewRegisteredCounter("network.sync.send.notfound", nil)
)
// Handler for storage/retrieval related protocol requests
// implements the StorageHandler interface used by the bzz protocol
type Depo struct {
hashfunc storage.SwarmHasher
localStore storage.ChunkStore
netStore storage.ChunkStore
}
func NewDepo(hash storage.SwarmHasher, localStore, remoteStore storage.ChunkStore) *Depo {
return &Depo{
hashfunc: hash,
localStore: localStore,
netStore: remoteStore, // entrypoint internal
}
}
// Handles UnsyncedKeysMsg after msg decoding - unsynced hashes upto sync state
// * the remote sync state is just stored and handled in protocol
// * filters through the new syncRequests and send the ones missing
// * back immediately as a deliveryRequest message
// * empty message just pings back for more (is this needed?)
// * strict signed sync states may be needed.
func (self *Depo) HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error {
unsynced := req.Unsynced
var missing []*syncRequest
var chunk *storage.Chunk
var err error
for _, req := range unsynced {
// skip keys that are found,
chunk, err = self.localStore.Get(req.Key[:])
if err != nil || chunk.SData == nil {
missing = append(missing, req)
}
}
log.Debug(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v unsynced keys: %v missing. new state: %v", len(unsynced), len(missing), req.State))
log.Trace(fmt.Sprintf("Depo.HandleUnsyncedKeysMsg: received %v", unsynced))
// send delivery request with missing keys
err = p.deliveryRequest(missing)
if err != nil {
return err
}
// set peers state to persist
p.syncState = req.State
return nil
}
// Handles deliveryRequestMsg
// * serves actual chunks asked by the remote peer
// by pushing to the delivery queue (sync db) of the correct priority
// (remote peer is free to reprioritize)
// * the message implies remote peer wants more, so trigger for
// * new outgoing unsynced keys message is fired
func (self *Depo) HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error {
deliver := req.Deliver
// queue the actual delivery of a chunk ()
log.Trace(fmt.Sprintf("Depo.HandleDeliveryRequestMsg: received %v delivery requests: %v", len(deliver), deliver))
for _, sreq := range deliver {
// TODO: look up in cache here or in deliveries
// priorities are taken from the message so the remote party can
// reprioritise to at their leisure
// r = self.pullCached(sreq.Key) // pulls and deletes from cache
Push(p, sreq.Key, sreq.Priority)
}
// sends it out as unsyncedKeysMsg
p.syncer.sendUnsyncedKeys()
return nil
}
// the entrypoint for store requests coming from the bzz wire protocol
// if key found locally, return. otherwise
// remote is untrusted, so hash is verified and chunk passed on to NetStore
func (self *Depo) HandleStoreRequestMsg(req *storeRequestMsgData, p *peer) {
var islocal bool
req.from = p
chunk, err := self.localStore.Get(req.Key)
switch {
case err != nil:
log.Trace(fmt.Sprintf("Depo.handleStoreRequest: %v not found locally. create new chunk/request", req.Key))
// not found in memory cache, ie., a genuine store request
// create chunk
syncReceiveCount.Inc(1)
chunk = storage.NewChunk(req.Key, nil)
case chunk.SData == nil:
// found chunk in memory store, needs the data, validate now
log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v. request entry found", req))
default:
// data is found, store request ignored
// this should update access count?
syncReceiveIgnore.Inc(1)
log.Trace(fmt.Sprintf("Depo.HandleStoreRequest: %v found locally. ignore.", req))
islocal = true
//return
}
hasher := self.hashfunc()
hasher.Write(req.SData)
if !bytes.Equal(hasher.Sum(nil), req.Key) {
// data does not validate, ignore
// TODO: peer should be penalised/dropped?
log.Warn(fmt.Sprintf("Depo.HandleStoreRequest: chunk invalid. store request ignored: %v", req))
return
}
if islocal {
return
}
// update chunk with size and data
chunk.SData = req.SData // protocol validates that SData is minimum 9 bytes long (int64 size + at least one byte of data)
chunk.Size = int64(binary.LittleEndian.Uint64(req.SData[0:8]))
log.Trace(fmt.Sprintf("delivery of %v from %v", chunk, p))
chunk.Source = p
self.netStore.Put(chunk)
}
// entrypoint for retrieve requests coming from the bzz wire protocol
// checks swap balance - return if peer has no credit
func (self *Depo) HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer) {
req.from = p
// swap - record credit for 1 request
// note that only charge actual reqsearches
var err error
if p.swap != nil {
err = p.swap.Add(1)
}
if err != nil {
log.Warn(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - cannot process request: %v", req.Key.Log(), err))
return
}
// call storage.NetStore#Get which
// blocks until local retrieval finished
// launches cloud retrieval
chunk, _ := self.netStore.Get(req.Key)
req = self.strategyUpdateRequest(chunk.Req, req)
// check if we can immediately deliver
if chunk.SData != nil {
log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, delivering...", req.Key.Log()))
if req.MaxSize == 0 || int64(req.MaxSize) >= chunk.Size {
sreq := &storeRequestMsgData{
Id: req.Id,
Key: chunk.Key,
SData: chunk.SData,
requestTimeout: req.timeout, //
}
syncSendCount.Inc(1)
p.syncer.addRequest(sreq, DeliverReq)
} else {
syncSendRefused.Inc(1)
log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content found, not wanted", req.Key.Log()))
}
} else {
syncSendNotFound.Inc(1)
log.Trace(fmt.Sprintf("Depo.HandleRetrieveRequest: %v - content not found locally. asked swarm for help. will get back", req.Key.Log()))
}
}
// add peer request the chunk and decides the timeout for the response if still searching
func (self *Depo) strategyUpdateRequest(rs *storage.RequestStatus, origReq *retrieveRequestMsgData) (req *retrieveRequestMsgData) {
log.Trace(fmt.Sprintf("Depo.strategyUpdateRequest: key %v", origReq.Key.Log()))
// we do not create an alternative one
req = origReq
if rs != nil {
self.addRequester(rs, req)
req.setTimeout(self.searchTimeout(rs, req))
}
return
}
// decides the timeout promise sent with the immediate peers response to a retrieve request
// if timeout is explicitly set and expired
func (self *Depo) searchTimeout(rs *storage.RequestStatus, req *retrieveRequestMsgData) (timeout *time.Time) {
reqt := req.getTimeout()
t := time.Now().Add(searchTimeout)
if reqt != nil && reqt.Before(t) {
return reqt
} else {
return &t
}
}
/*
adds a new peer to an existing open request
only add if less than requesterCount peers forwarded the same request id so far
note this is done irrespective of status (searching or found)
*/
func (self *Depo) addRequester(rs *storage.RequestStatus, req *retrieveRequestMsgData) {
log.Trace(fmt.Sprintf("Depo.addRequester: key %v - add peer to req.Id %v", req.Key.Log(), req.Id))
list := rs.Requesters[req.Id]
rs.Requesters[req.Id] = append(list, req)
}

@ -0,0 +1,210 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"fmt"
"sync"
"github.com/ethereum/go-ethereum/swarm/pot"
)
// discovery bzz extension for requesting and relaying node address records
// discPeer wraps BzzPeer and embeds an Overlay connectivity driver
type discPeer struct {
*BzzPeer
overlay Overlay
sentPeers bool // whether we already sent peer closer to this address
mtx sync.RWMutex
peers map[string]bool // tracks node records sent to the peer
depth uint8 // the proximity order advertised by remote as depth of saturation
}
// NewDiscovery constructs a discovery peer
func newDiscovery(p *BzzPeer, o Overlay) *discPeer {
d := &discPeer{
overlay: o,
BzzPeer: p,
peers: make(map[string]bool),
}
// record remote as seen so we never send a peer its own record
d.seen(d)
return d
}
// HandleMsg is the message handler that delegates incoming messages
func (d *discPeer) HandleMsg(msg interface{}) error {
switch msg := msg.(type) {
case *peersMsg:
return d.handlePeersMsg(msg)
case *subPeersMsg:
return d.handleSubPeersMsg(msg)
default:
return fmt.Errorf("unknown message type: %T", msg)
}
}
// NotifyDepth sends a message to all connections if depth of saturation is changed
func NotifyDepth(depth uint8, h Overlay) {
f := func(val OverlayConn, po int, _ bool) bool {
dp, ok := val.(*discPeer)
if ok {
dp.NotifyDepth(depth)
}
return true
}
h.EachConn(nil, 255, f)
}
// NotifyPeer informs all peers about a newly added node
func NotifyPeer(p OverlayAddr, k Overlay) {
f := func(val OverlayConn, po int, _ bool) bool {
dp, ok := val.(*discPeer)
if ok {
dp.NotifyPeer(p, uint8(po))
}
return true
}
k.EachConn(p.Address(), 255, f)
}
// NotifyPeer notifies the remote node (recipient) about a peer if
// the peer's PO is within the recipients advertised depth
// OR the peer is closer to the recipient than self
// unless already notified during the connection session
func (d *discPeer) NotifyPeer(a OverlayAddr, po uint8) {
// immediately return
if (po < d.getDepth() && pot.ProxCmp(d.localAddr, d, a) != 1) || d.seen(a) {
return
}
// log.Trace(fmt.Sprintf("%08x peer %08x notified of peer %08x", d.localAddr.Over()[:4], d.Address()[:4], a.Address()[:4]))
resp := &peersMsg{
Peers: []*BzzAddr{ToAddr(a)},
}
go d.Send(resp)
}
// NotifyDepth sends a subPeers Msg to the receiver notifying them about
// a change in the depth of saturation
func (d *discPeer) NotifyDepth(po uint8) {
// log.Trace(fmt.Sprintf("%08x peer %08x notified of new depth %v", d.localAddr.Over()[:4], d.Address()[:4], po))
go d.Send(&subPeersMsg{Depth: po})
}
/*
peersMsg is the message to pass peer information
It is always a response to a peersRequestMsg
The encoding of a peer address is identical the devp2p base protocol peers
messages: [IP, Port, NodeID],
Note that a node's FileStore address is not the NodeID but the hash of the NodeID.
TODO:
To mitigate against spurious peers messages, requests should be remembered
and correctness of responses should be checked
If the proxBin of peers in the response is incorrect the sender should be
disconnected
*/
// peersMsg encapsulates an array of peer addresses
// used for communicating about known peers
// relevant for bootstrapping connectivity and updating peersets
type peersMsg struct {
Peers []*BzzAddr
}
// String pretty prints a peersMsg
func (msg peersMsg) String() string {
return fmt.Sprintf("%T: %v", msg, msg.Peers)
}
// handlePeersMsg called by the protocol when receiving peerset (for target address)
// list of nodes ([]PeerAddr in peersMsg) is added to the overlay db using the
// Register interface method
func (d *discPeer) handlePeersMsg(msg *peersMsg) error {
// register all addresses
if len(msg.Peers) == 0 {
return nil
}
for _, a := range msg.Peers {
d.seen(a)
NotifyPeer(a, d.overlay)
}
return d.overlay.Register(toOverlayAddrs(msg.Peers...))
}
// subPeers msg is communicating the depth/sharpness/focus of the overlay table of a peer
type subPeersMsg struct {
Depth uint8
}
// String returns the pretty printer
func (msg subPeersMsg) String() string {
return fmt.Sprintf("%T: request peers > PO%02d. ", msg, msg.Depth)
}
func (d *discPeer) handleSubPeersMsg(msg *subPeersMsg) error {
if !d.sentPeers {
d.setDepth(msg.Depth)
var peers []*BzzAddr
d.overlay.EachConn(d.Over(), 255, func(p OverlayConn, po int, isproxbin bool) bool {
if pob, _ := pof(d, d.localAddr, 0); pob > po {
return false
}
if !d.seen(p) {
peers = append(peers, ToAddr(p.Off()))
}
return true
})
if len(peers) > 0 {
// log.Debug(fmt.Sprintf("%08x: %v peers sent to %v", d.overlay.BaseAddr(), len(peers), d))
go d.Send(&peersMsg{Peers: peers})
}
}
d.sentPeers = true
return nil
}
// seen takes an Overlay peer and checks if it was sent to a peer already
// if not, marks the peer as sent
func (d *discPeer) seen(p OverlayPeer) bool {
d.mtx.Lock()
defer d.mtx.Unlock()
k := string(p.Address())
if d.peers[k] {
return true
}
d.peers[k] = true
return false
}
func (d *discPeer) getDepth() uint8 {
d.mtx.RLock()
defer d.mtx.RUnlock()
return d.depth
}
func (d *discPeer) setDepth(depth uint8) {
d.mtx.Lock()
defer d.mtx.Unlock()
d.depth = depth
}

@ -0,0 +1,57 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"testing"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
)
/***
*
* - after connect, that outgoing subpeersmsg is sent
*
*/
func TestDiscovery(t *testing.T) {
params := NewHiveParams()
s, pp := newHiveTester(t, params, 1, nil)
id := s.IDs[0]
raddr := NewAddrFromNodeID(id)
pp.Register([]OverlayAddr{OverlayAddr(raddr)})
// start the hive and wait for the connection
pp.Start(s.Server)
defer pp.Stop()
// send subPeersMsg to the peer
err := s.TestExchanges(p2ptest.Exchange{
Label: "outgoing subPeersMsg",
Expects: []p2ptest.Expect{
{
Code: 1,
Msg: &subPeersMsg{Depth: 0},
Peer: id,
},
},
})
if err != nil {
t.Fatal(err)
}
}

@ -1,150 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"fmt"
"math/rand"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
)
const requesterCount = 3
/*
forwarder implements the CloudStore interface (use by storage.NetStore)
and serves as the cloud store backend orchestrating storage/retrieval/delivery
via the native bzz protocol
which uses an MSB logarithmic distance-based semi-permanent Kademlia table for
* recursive forwarding style routing for retrieval
* smart syncronisation
*/
type forwarder struct {
hive *Hive
}
func NewForwarder(hive *Hive) *forwarder {
return &forwarder{hive: hive}
}
// generate a unique id uint64
func generateId() uint64 {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return uint64(r.Int63())
}
var searchTimeout = 3 * time.Second
// forwarding logic
// logic propagating retrieve requests to peers given by the kademlia hive
func (self *forwarder) Retrieve(chunk *storage.Chunk) {
peers := self.hive.getPeers(chunk.Key, 0)
log.Trace(fmt.Sprintf("forwarder.Retrieve: %v - received %d peers from KΛÐΞMLIΛ...", chunk.Key.Log(), len(peers)))
OUT:
for _, p := range peers {
log.Trace(fmt.Sprintf("forwarder.Retrieve: sending retrieveRequest %v to peer [%v]", chunk.Key.Log(), p))
for _, recipients := range chunk.Req.Requesters {
for _, recipient := range recipients {
req := recipient.(*retrieveRequestMsgData)
if req.from.Addr() == p.Addr() {
continue OUT
}
}
}
req := &retrieveRequestMsgData{
Key: chunk.Key,
Id: generateId(),
}
var err error
if p.swap != nil {
err = p.swap.Add(-1)
}
if err == nil {
p.retrieve(req)
break OUT
}
log.Warn(fmt.Sprintf("forwarder.Retrieve: unable to send retrieveRequest to peer [%v]: %v", chunk.Key.Log(), err))
}
}
// requests to specific peers given by the kademlia hive
// except for peers that the store request came from (if any)
// delivery queueing taken care of by syncer
func (self *forwarder) Store(chunk *storage.Chunk) {
var n int
msg := &storeRequestMsgData{
Key: chunk.Key,
SData: chunk.SData,
}
var source *peer
if chunk.Source != nil {
source = chunk.Source.(*peer)
}
for _, p := range self.hive.getPeers(chunk.Key, 0) {
log.Trace(fmt.Sprintf("forwarder.Store: %v %v", p, chunk))
if p.syncer != nil && (source == nil || p.Addr() != source.Addr()) {
n++
Deliver(p, msg, PropagateReq)
}
}
log.Trace(fmt.Sprintf("forwarder.Store: sent to %v peers (chunk = %v)", n, chunk))
}
// once a chunk is found deliver it to its requesters unless timed out
func (self *forwarder) Deliver(chunk *storage.Chunk) {
// iterate over request entries
for id, requesters := range chunk.Req.Requesters {
counter := requesterCount
msg := &storeRequestMsgData{
Key: chunk.Key,
SData: chunk.SData,
}
var n int
var req *retrieveRequestMsgData
// iterate over requesters with the same id
for id, r := range requesters {
req = r.(*retrieveRequestMsgData)
if req.timeout == nil || req.timeout.After(time.Now()) {
log.Trace(fmt.Sprintf("forwarder.Deliver: %v -> %v", req.Id, req.from))
msg.Id = uint64(id)
Deliver(req.from, msg, DeliverReq)
n++
counter--
if counter <= 0 {
break
}
}
}
log.Trace(fmt.Sprintf("forwarder.Deliver: submit chunk %v (request id %v) for delivery to %v peers", chunk.Key.Log(), id, n))
}
}
// initiate delivery of a chunk to a particular peer via syncer#addRequest
// depending on syncer mode and priority settings and sync request type
// this either goes via confirmation roundtrip or queued or pushed directly
func Deliver(p *peer, req interface{}, ty int) {
p.syncer.addRequest(req, ty)
}
// push chunk over to peer
func Push(p *peer, key storage.Key, priority uint) {
p.syncer.doDelivery(key, priority, p.syncer.quit)
}

@ -18,386 +18,244 @@ package network
import ( import (
"fmt" "fmt"
"math/rand" "sync"
"path/filepath"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network/kademlia" "github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
) )
// Hive is the logistic manager of the swarm /*
// it uses a generic kademlia nodetable to find best peer list Hive is the logistic manager of the swarm
// for any target
// this is used by the netstore to search for content in the swarm When the hive is started, a forever loop is launched that
// the bzz protocol peersMsgData exchange is relayed to Kademlia asks the Overlay Topology driver (e.g., generic kademlia nodetable)
// for db storage and filtering to suggest peers to bootstrap connectivity
// connections and disconnections are reported and relayed */
// to keep the nodetable uptodate
// Overlay is the interface for kademlia (or other topology drivers)
var ( type Overlay interface {
peersNumGauge = metrics.NewRegisteredGauge("network.peers.num", nil) // suggest peers to connect to
addPeerCounter = metrics.NewRegisteredCounter("network.addpeer.count", nil) SuggestPeer() (OverlayAddr, int, bool)
removePeerCounter = metrics.NewRegisteredCounter("network.removepeer.count", nil) // register and deregister peer connections
) On(OverlayConn) (depth uint8, changed bool)
Off(OverlayConn)
type Hive struct { // register peer addresses
listenAddr func() string Register([]OverlayAddr) error
callInterval uint64 // iterate over connected peers
id discover.NodeID EachConn([]byte, int, func(OverlayConn, int, bool) bool)
addr kademlia.Address // iterate over known peers (address records)
kad *kademlia.Kademlia EachAddr([]byte, int, func(OverlayAddr, int, bool) bool)
path string // pretty print the connectivity
quit chan bool String() string
toggle chan bool // base Overlay address of the node itself
more chan bool BaseAddr() []byte
// connectivity health check used for testing
// for testing only Healthy(*PeerPot) *Health
swapEnabled bool
syncEnabled bool
blockRead bool
blockWrite bool
} }
const ( // HiveParams holds the config options to hive
callInterval = 3000000000
// bucketSize = 3
// maxProx = 8
// proxBinSize = 4
)
type HiveParams struct { type HiveParams struct {
CallInterval uint64 Discovery bool // if want discovery of not
KadDbPath string PeersBroadcastSetSize uint8 // how many peers to use when relaying
*kademlia.KadParams MaxPeersPerRequest uint8 // max size for peer address batches
KeepAliveInterval time.Duration
} }
//create default params // NewHiveParams returns hive config with only the
func NewDefaultHiveParams() *HiveParams { func NewHiveParams() *HiveParams {
kad := kademlia.NewDefaultKadParams()
// kad.BucketSize = bucketSize
// kad.MaxProx = maxProx
// kad.ProxBinSize = proxBinSize
return &HiveParams{ return &HiveParams{
CallInterval: callInterval, Discovery: true,
KadParams: kad, PeersBroadcastSetSize: 3,
MaxPeersPerRequest: 5,
KeepAliveInterval: 500 * time.Millisecond,
} }
} }
//this can only finally be set after all config options (file, cmd line, env vars) // Hive manages network connections of the swarm node
//have been evaluated type Hive struct {
func (self *HiveParams) Init(path string) { *HiveParams // settings
self.KadDbPath = filepath.Join(path, "bzz-peers.json") Overlay // the overlay connectiviy driver
Store state.Store // storage interface to save peers across sessions
addPeer func(*discover.Node) // server callback to connect to a peer
// bookkeeping
lock sync.Mutex
ticker *time.Ticker
} }
func NewHive(addr common.Hash, params *HiveParams, swapEnabled, syncEnabled bool) *Hive { // NewHive constructs a new hive
kad := kademlia.New(kademlia.Address(addr), params.KadParams) // HiveParams: config parameters
// Overlay: connectivity driver using a network topology
// StateStore: to save peers across sessions
func NewHive(params *HiveParams, overlay Overlay, store state.Store) *Hive {
return &Hive{ return &Hive{
callInterval: params.CallInterval, HiveParams: params,
kad: kad, Overlay: overlay,
addr: kad.Addr(), Store: store,
path: params.KadDbPath,
swapEnabled: swapEnabled,
syncEnabled: syncEnabled,
} }
} }
func (self *Hive) SyncEnabled(on bool) { // Start stars the hive, receives p2p.Server only at startup
self.syncEnabled = on // server is used to connect to a peer based on its NodeID or enode URL
} // these are called on the p2p.Server which runs on the node
func (h *Hive) Start(server *p2p.Server) error {
func (self *Hive) SwapEnabled(on bool) { log.Info(fmt.Sprintf("%08x hive starting", h.BaseAddr()[:4]))
self.swapEnabled = on // if state store is specified, load peers to prepopulate the overlay address book
} if h.Store != nil {
log.Info("detected an existing store. trying to load peers")
func (self *Hive) BlockNetworkRead(on bool) { if err := h.loadPeers(); err != nil {
self.blockRead = on log.Error(fmt.Sprintf("%08x hive encoutered an error trying to load peers", h.BaseAddr()[:4]))
} return err
}
func (self *Hive) BlockNetworkWrite(on bool) {
self.blockWrite = on
}
// public accessor to the hive base address
func (self *Hive) Addr() kademlia.Address {
return self.addr
}
// Start receives network info only at startup
// listedAddr is a function to retrieve listening address to advertise to peers
// connectPeer is a function to connect to a peer based on its NodeID or enode URL
// there are called on the p2p.Server which runs on the node
func (self *Hive) Start(id discover.NodeID, listenAddr func() string, connectPeer func(string) error) (err error) {
self.toggle = make(chan bool)
self.more = make(chan bool)
self.quit = make(chan bool)
self.id = id
self.listenAddr = listenAddr
err = self.kad.Load(self.path, nil)
if err != nil {
log.Warn(fmt.Sprintf("Warning: error reading kaddb '%s' (skipping): %v", self.path, err))
err = nil
} }
// assigns the p2p.Server#AddPeer function to connect to peers
h.addPeer = server.AddPeer
// ticker to keep the hive alive
h.ticker = time.NewTicker(h.KeepAliveInterval)
// this loop is doing bootstrapping and maintains a healthy table // this loop is doing bootstrapping and maintains a healthy table
go self.keepAlive() go h.connect()
go func() { return nil
// whenever toggled ask kademlia about most preferred peer
for alive := range self.more {
if !alive {
// receiving false closes the loop while allowing parallel routines
// to attempt to write to more (remove Peer when shutting down)
return
}
node, need, proxLimit := self.kad.Suggest()
if node != nil && len(node.Url) > 0 {
log.Trace(fmt.Sprintf("call known bee %v", node.Url))
// enode or any lower level connection address is unnecessary in future
// discovery table is used to look it up.
connectPeer(node.Url)
}
if need {
// a random peer is taken from the table
peers := self.kad.FindClosest(kademlia.RandomAddressAt(self.addr, rand.Intn(self.kad.MaxProx)), 1)
if len(peers) > 0 {
// a random address at prox bin 0 is sent for lookup
randAddr := kademlia.RandomAddressAt(self.addr, proxLimit)
req := &retrieveRequestMsgData{
Key: storage.Key(randAddr[:]),
}
log.Trace(fmt.Sprintf("call any bee near %v (PO%03d) - messenger bee: %v", randAddr, proxLimit, peers[0]))
peers[0].(*peer).retrieve(req)
} else {
log.Warn(fmt.Sprintf("no peer"))
}
log.Trace(fmt.Sprintf("buzz kept alive"))
} else {
log.Info(fmt.Sprintf("no need for more bees"))
}
select {
case self.toggle <- need:
case <-self.quit:
return
}
log.Debug(fmt.Sprintf("queen's address: %v, population: %d (%d)", self.addr, self.kad.Count(), self.kad.DBCount()))
}
}()
return
} }
// keepAlive is a forever loop // Stop terminates the updateloop and saves the peers
// in its awake state it periodically triggers connection attempts func (h *Hive) Stop() error {
// by writing to self.more until Kademlia Table is saturated log.Info(fmt.Sprintf("%08x hive stopping, saving peers", h.BaseAddr()[:4]))
// wake state is toggled by writing to self.toggle h.ticker.Stop()
// it restarts if the table becomes non-full again due to disconnections if h.Store != nil {
func (self *Hive) keepAlive() { if err := h.savePeers(); err != nil {
alarm := time.NewTicker(time.Duration(self.callInterval)).C return fmt.Errorf("could not save peers to persistence store: %v", err)
for {
peersNumGauge.Update(int64(self.kad.Count()))
select {
case <-alarm:
if self.kad.DBCount() > 0 {
select {
case self.more <- true:
log.Debug(fmt.Sprintf("buzz wakeup"))
default:
}
}
case need := <-self.toggle:
if alarm == nil && need {
alarm = time.NewTicker(time.Duration(self.callInterval)).C
}
if alarm != nil && !need {
alarm = nil
}
case <-self.quit:
return
} }
} if err := h.Store.Close(); err != nil {
} return fmt.Errorf("could not close file handle to persistence store: %v", err)
func (self *Hive) Stop() error {
// closing toggle channel quits the updateloop
close(self.quit)
return self.kad.Save(self.path, saveSync)
}
// called at the end of a successful protocol handshake
func (self *Hive) addPeer(p *peer) error {
addPeerCounter.Inc(1)
defer func() {
select {
case self.more <- true:
default:
} }
}()
log.Trace(fmt.Sprintf("hi new bee %v", p))
err := self.kad.On(p, loadSync)
if err != nil {
return err
} }
// self lookup (can be encoded as nil/zero key since peers addr known) + no id () log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4]))
// the most common way of saying hi in bzz is initiation of gossip h.EachConn(nil, 255, func(p OverlayConn, _ int, _ bool) bool {
// let me know about anyone new from my hood , here is the storageradius log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4]))
// to send the 6 byte self lookup p.Drop(nil)
// we do not record as request or forward it, just reply with peers return true
p.retrieve(&retrieveRequestMsgData{}) })
log.Trace(fmt.Sprintf("'whatsup wheresdaparty' sent to %v", p))
log.Info(fmt.Sprintf("%08x all peers dropped", h.BaseAddr()[:4]))
return nil return nil
} }
// called after peer disconnected // connect is a forever loop
func (self *Hive) removePeer(p *peer) { // at each iteration, ask the overlay driver to suggest the most preferred peer to connect to
removePeerCounter.Inc(1) // as well as advertises saturation depth if needed
log.Debug(fmt.Sprintf("bee %v removed", p)) func (h *Hive) connect() {
self.kad.Off(p, saveSync) for range h.ticker.C {
select {
case self.more <- true:
default:
}
if self.kad.Count() == 0 {
log.Debug(fmt.Sprintf("empty, all bees gone"))
}
}
// Retrieve a list of live peers that are closer to target than us addr, depth, changed := h.SuggestPeer()
func (self *Hive) getPeers(target storage.Key, max int) (peers []*peer) { if h.Discovery && changed {
var addr kademlia.Address NotifyDepth(uint8(depth), h)
copy(addr[:], target[:]) }
for _, node := range self.kad.FindClosest(addr, max) { if addr == nil {
peers = append(peers, node.(*peer)) continue
} }
return
}
// disconnects all the peers
func (self *Hive) DropAll() {
log.Info(fmt.Sprintf("dropping all bees"))
for _, node := range self.kad.FindClosest(kademlia.Address{}, 0) {
node.Drop()
}
}
// contructor for kademlia.NodeRecord based on peer address alone
// TODO: should go away and only addr passed to kademlia
func newNodeRecord(addr *peerAddr) *kademlia.NodeRecord {
now := time.Now()
return &kademlia.NodeRecord{
Addr: addr.Addr,
Url: addr.String(),
Seen: now,
After: now,
}
}
// called by the protocol when receiving peerset (for target address) log.Trace(fmt.Sprintf("%08x hive connect() suggested %08x", h.BaseAddr()[:4], addr.Address()[:4]))
// peersMsgData is converted to a slice of NodeRecords for Kademlia under, err := discover.ParseNode(string(addr.(Addr).Under()))
// this is to store all thats needed if err != nil {
func (self *Hive) HandlePeersMsg(req *peersMsgData, from *peer) { log.Warn(fmt.Sprintf("%08x unable to connect to bee %08x: invalid node URL: %v", h.BaseAddr()[:4], addr.Address()[:4], err))
var nrs []*kademlia.NodeRecord
for _, p := range req.Peers {
if err := netutil.CheckRelayIP(from.remoteAddr.IP, p.IP); err != nil {
log.Trace(fmt.Sprintf("invalid peer IP %v from %v: %v", from.remoteAddr.IP, p.IP, err))
continue continue
} }
nrs = append(nrs, newNodeRecord(p)) log.Trace(fmt.Sprintf("%08x attempt to connect to bee %08x", h.BaseAddr()[:4], addr.Address()[:4]))
h.addPeer(under)
} }
self.kad.Add(nrs)
} }
// peer wraps the protocol instance to represent a connected peer // Run protocol run function
// it implements kademlia.Node interface func (h *Hive) Run(p *BzzPeer) error {
type peer struct { dp := newDiscovery(p, h)
*bzz // protocol instance running on peer connection depth, changed := h.On(dp)
} // if we want discovery, advertise change of depth
if h.Discovery {
// protocol instance implements kademlia.Node interface (embedded peer) if changed {
func (self *peer) Addr() kademlia.Address { // if depth changed, send to all peers
return self.remoteAddr.Addr NotifyDepth(depth, h)
} else {
// otherwise just send depth to new peer
dp.NotifyDepth(depth)
}
}
NotifyPeer(p.Off(), h)
defer h.Off(dp)
return dp.Run(dp.HandleMsg)
} }
func (self *peer) Url() string { // NodeInfo function is used by the p2p.server RPC interface to display
return self.remoteAddr.String() // protocol specific node information
func (h *Hive) NodeInfo() interface{} {
return h.String()
} }
// TODO take into account traffic // PeerInfo function is used by the p2p.server RPC interface to display
func (self *peer) LastActive() time.Time { // protocol specific information any connected peer referred to by their NodeID
return self.lastActive func (h *Hive) PeerInfo(id discover.NodeID) interface{} {
addr := NewAddrFromNodeID(id)
return struct {
OAddr hexutil.Bytes
UAddr hexutil.Bytes
}{
OAddr: addr.OAddr,
UAddr: addr.UAddr,
}
} }
// reads the serialised form of sync state persisted as the 'Meta' attribute // ToAddr returns the serialisable version of u
// and sets the decoded syncState on the online node func ToAddr(pa OverlayPeer) *BzzAddr {
func loadSync(record *kademlia.NodeRecord, node kademlia.Node) error { if addr, ok := pa.(*BzzAddr); ok {
p, ok := node.(*peer) return addr
if !ok {
return fmt.Errorf("invalid type")
} }
if record.Meta == nil { if p, ok := pa.(*discPeer); ok {
log.Debug(fmt.Sprintf("no sync state for node record %v setting default", record)) return p.BzzAddr
p.syncState = &syncState{DbSyncState: &storage.DbSyncState{}}
return nil
} }
state, err := decodeSync(record.Meta) return pa.(*BzzPeer).BzzAddr
if err != nil {
return fmt.Errorf("error decoding kddb record meta info into a sync state: %v", err)
}
log.Trace(fmt.Sprintf("sync state for node record %v read from Meta: %s", record, string(*(record.Meta))))
p.syncState = state
return err
} }
// callback when saving a sync state // loadPeers, savePeer implement persistence callback/
func saveSync(record *kademlia.NodeRecord, node kademlia.Node) { func (h *Hive) loadPeers() error {
if p, ok := node.(*peer); ok { var as []*BzzAddr
meta, err := encodeSync(p.syncState) err := h.Store.Get("peers", &as)
if err != nil { if err != nil {
log.Warn(fmt.Sprintf("error saving sync state for %v: %v", node, err)) if err == state.ErrNotFound {
return log.Info(fmt.Sprintf("hive %08x: no persisted peers found", h.BaseAddr()[:4]))
return nil
} }
log.Trace(fmt.Sprintf("saved sync state for %v: %s", node, string(*meta))) return err
record.Meta = meta
} }
} log.Info(fmt.Sprintf("hive %08x: peers loaded", h.BaseAddr()[:4]))
// the immediate response to a retrieve request, return h.Register(toOverlayAddrs(as...))
// sends relevant peer data given by the kademlia hive to the requester }
// TODO: remember peers sent for duration of the session, only new peers sent
func (self *Hive) peers(req *retrieveRequestMsgData) {
if req != nil {
var addrs []*peerAddr
if req.timeout == nil || time.Now().Before(*(req.timeout)) {
key := req.Key
// self lookup from remote peer
if storage.IsZeroKey(key) {
addr := req.from.Addr()
key = storage.Key(addr[:])
req.Key = nil
}
// get peer addresses from hive
for _, peer := range self.getPeers(key, int(req.MaxPeers)) {
addrs = append(addrs, peer.remoteAddr)
}
log.Debug(fmt.Sprintf("Hive sending %d peer addresses to %v. req.Id: %v, req.Key: %v", len(addrs), req.from, req.Id, req.Key.Log()))
peersData := &peersMsgData{ // toOverlayAddrs transforms an array of BzzAddr to OverlayAddr
Peers: addrs, func toOverlayAddrs(as ...*BzzAddr) (oas []OverlayAddr) {
Key: req.Key, for _, a := range as {
Id: req.Id, oas = append(oas, OverlayAddr(a))
}
peersData.setTimeout(req.timeout)
req.from.peers(peersData)
}
} }
return
} }
func (self *Hive) String() string { // savePeers, savePeer implement persistence callback/
return self.kad.String() func (h *Hive) savePeers() error {
var peers []*BzzAddr
h.Overlay.EachAddr(nil, 256, func(pa OverlayAddr, i int, _ bool) bool {
if pa == nil {
log.Warn(fmt.Sprintf("empty addr: %v", i))
return true
}
apa := ToAddr(pa)
log.Trace("saving peer", "peer", apa)
peers = append(peers, apa)
return true
})
if err := h.Store.Put("peers", peers); err != nil {
return fmt.Errorf("could not save peers: %v", err)
}
return nil
} }

@ -0,0 +1,108 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"io/ioutil"
"log"
"os"
"testing"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
"github.com/ethereum/go-ethereum/swarm/state"
)
func newHiveTester(t *testing.T, params *HiveParams, n int, store state.Store) (*bzzTester, *Hive) {
// setup
addr := RandomAddr() // tested peers peer address
to := NewKademlia(addr.OAddr, NewKadParams())
pp := NewHive(params, to, store) // hive
return newBzzBaseTester(t, n, addr, DiscoverySpec, pp.Run), pp
}
func TestRegisterAndConnect(t *testing.T) {
params := NewHiveParams()
s, pp := newHiveTester(t, params, 1, nil)
id := s.IDs[0]
raddr := NewAddrFromNodeID(id)
pp.Register([]OverlayAddr{OverlayAddr(raddr)})
// start the hive and wait for the connection
err := pp.Start(s.Server)
if err != nil {
t.Fatal(err)
}
defer pp.Stop()
// retrieve and broadcast
err = s.TestDisconnected(&p2ptest.Disconnect{
Peer: s.IDs[0],
Error: nil,
})
if err == nil || err.Error() != "timed out waiting for peers to disconnect" {
t.Fatalf("expected peer to connect")
}
}
func TestHiveStatePersistance(t *testing.T) {
log.SetOutput(os.Stdout)
dir, err := ioutil.TempDir("", "hive_test_store")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
store, err := state.NewDBStore(dir) //start the hive with an empty dbstore
params := NewHiveParams()
s, pp := newHiveTester(t, params, 5, store)
peers := make(map[string]bool)
for _, id := range s.IDs {
raddr := NewAddrFromNodeID(id)
pp.Register([]OverlayAddr{OverlayAddr(raddr)})
peers[raddr.String()] = true
}
// start the hive and wait for the connection
err = pp.Start(s.Server)
if err != nil {
t.Fatal(err)
}
pp.Stop()
store.Close()
persistedStore, err := state.NewDBStore(dir) //start the hive with an empty dbstore
s1, pp := newHiveTester(t, params, 1, persistedStore)
//start the hive and wait for the connection
pp.Start(s1.Server)
i := 0
pp.Overlay.EachAddr(nil, 256, func(addr OverlayAddr, po int, nn bool) bool {
delete(peers, addr.(*BzzAddr).String())
i++
return true
})
if len(peers) != 0 || i != 5 {
t.Fatalf("invalid peers loaded")
}
}

@ -0,0 +1,765 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"bytes"
"fmt"
"math/rand"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/pot"
)
/*
Taking the proximity order relative to a fix point x classifies the points in
the space (n byte long byte sequences) into bins. Items in each are at
most half as distant from x as items in the previous bin. Given a sample of
uniformly distributed items (a hash function over arbitrary sequence) the
proximity scale maps onto series of subsets with cardinalities on a negative
exponential scale.
It also has the property that any two item belonging to the same bin are at
most half as distant from each other as they are from x.
If we think of random sample of items in the bins as connections in a network of
interconnected nodes then relative proximity can serve as the basis for local
decisions for graph traversal where the task is to find a route between two
points. Since in every hop, the finite distance halves, there is
a guaranteed constant maximum limit on the number of hops needed to reach one
node from the other.
*/
var pof = pot.DefaultPof(256)
// KadParams holds the config params for Kademlia
type KadParams struct {
// adjustable parameters
MaxProxDisplay int // number of rows the table shows
MinProxBinSize int // nearest neighbour core minimum cardinality
MinBinSize int // minimum number of peers in a row
MaxBinSize int // maximum number of peers in a row before pruning
RetryInterval int64 // initial interval before a peer is first redialed
RetryExponent int // exponent to multiply retry intervals with
MaxRetries int // maximum number of redial attempts
// function to sanction or prevent suggesting a peer
Reachable func(OverlayAddr) bool
}
// NewKadParams returns a params struct with default values
func NewKadParams() *KadParams {
return &KadParams{
MaxProxDisplay: 16,
MinProxBinSize: 2,
MinBinSize: 2,
MaxBinSize: 4,
RetryInterval: 4200000000, // 4.2 sec
MaxRetries: 42,
RetryExponent: 2,
}
}
// Kademlia is a table of live peers and a db of known peers (node records)
type Kademlia struct {
lock sync.RWMutex
*KadParams // Kademlia configuration parameters
base []byte // immutable baseaddress of the table
addrs *pot.Pot // pots container for known peer addresses
conns *pot.Pot // pots container for live peer connections
depth uint8 // stores the last current depth of saturation
nDepth int // stores the last neighbourhood depth
nDepthC chan int // returned by DepthC function to signal neighbourhood depth change
addrCountC chan int // returned by AddrCountC function to signal peer count change
}
// NewKademlia creates a Kademlia table for base address addr
// with parameters as in params
// if params is nil, it uses default values
func NewKademlia(addr []byte, params *KadParams) *Kademlia {
if params == nil {
params = NewKadParams()
}
return &Kademlia{
base: addr,
KadParams: params,
addrs: pot.NewPot(nil, 0),
conns: pot.NewPot(nil, 0),
}
}
// OverlayPeer interface captures the common aspect of view of a peer from the Overlay
// topology driver
type OverlayPeer interface {
Address() []byte
}
// OverlayConn represents a connected peer
type OverlayConn interface {
OverlayPeer
Drop(error) // call to indicate a peer should be expunged
Off() OverlayAddr // call to return a persitent OverlayAddr
}
// OverlayAddr represents a kademlia peer record
type OverlayAddr interface {
OverlayPeer
Update(OverlayAddr) OverlayAddr // returns the updated version of the original
}
// entry represents a Kademlia table entry (an extension of OverlayPeer)
type entry struct {
OverlayPeer
seenAt time.Time
retries int
}
// newEntry creates a kademlia peer from an OverlayPeer interface
func newEntry(p OverlayPeer) *entry {
return &entry{
OverlayPeer: p,
seenAt: time.Now(),
}
}
// Bin is the binary (bitvector) serialisation of the entry address
func (e *entry) Bin() string {
return pot.ToBin(e.addr().Address())
}
// Label is a short tag for the entry for debug
func Label(e *entry) string {
return fmt.Sprintf("%s (%d)", e.Hex()[:4], e.retries)
}
// Hex is the hexadecimal serialisation of the entry address
func (e *entry) Hex() string {
return fmt.Sprintf("%x", e.addr().Address())
}
// String is the short tag for the entry
func (e *entry) String() string {
return fmt.Sprintf("%s (%d)", e.Hex()[:8], e.retries)
}
// addr returns the kad peer record (OverlayAddr) corresponding to the entry
func (e *entry) addr() OverlayAddr {
a, _ := e.OverlayPeer.(OverlayAddr)
return a
}
// conn returns the connected peer (OverlayPeer) corresponding to the entry
func (e *entry) conn() OverlayConn {
c, _ := e.OverlayPeer.(OverlayConn)
return c
}
// Register enters each OverlayAddr as kademlia peer record into the
// database of known peer addresses
func (k *Kademlia) Register(peers []OverlayAddr) error {
k.lock.Lock()
defer k.lock.Unlock()
var known, size int
for _, p := range peers {
// error if self received, peer should know better
// and should be punished for this
if bytes.Equal(p.Address(), k.base) {
return fmt.Errorf("add peers: %x is self", k.base)
}
var found bool
k.addrs, _, found, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
// if not found
if v == nil {
// insert new offline peer into conns
return newEntry(p)
}
// found among known peers, do nothing
return v
})
if found {
known++
}
size++
}
// send new address count value only if there are new addresses
if k.addrCountC != nil && size-known > 0 {
k.addrCountC <- k.addrs.Size()
}
// log.Trace(fmt.Sprintf("%x registered %v peers, %v known, total: %v", k.BaseAddr()[:4], size, known, k.addrs.Size()))
k.sendNeighbourhoodDepthChange()
return nil
}
// SuggestPeer returns a known peer for the lowest proximity bin for the
// lowest bincount below depth
// naturally if there is an empty row it returns a peer for that
func (k *Kademlia) SuggestPeer() (a OverlayAddr, o int, want bool) {
k.lock.Lock()
defer k.lock.Unlock()
minsize := k.MinBinSize
depth := k.neighbourhoodDepth()
// if there is a callable neighbour within the current proxBin, connect
// this makes sure nearest neighbour set is fully connected
var ppo int
k.addrs.EachNeighbour(k.base, pof, func(val pot.Val, po int) bool {
if po < depth {
return false
}
a = k.callable(val)
ppo = po
return a == nil
})
if a != nil {
log.Trace(fmt.Sprintf("%08x candidate nearest neighbour found: %v (%v)", k.BaseAddr()[:4], a, ppo))
return a, 0, false
}
// log.Trace(fmt.Sprintf("%08x no candidate nearest neighbours to connect to (Depth: %v, minProxSize: %v) %#v", k.BaseAddr()[:4], depth, k.MinProxBinSize, a))
var bpo []int
prev := -1
k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
prev++
for ; prev < po; prev++ {
bpo = append(bpo, prev)
minsize = 0
}
if size < minsize {
bpo = append(bpo, po)
minsize = size
}
return size > 0 && po < depth
})
// all buckets are full, ie., minsize == k.MinBinSize
if len(bpo) == 0 {
// log.Debug(fmt.Sprintf("%08x: all bins saturated", k.BaseAddr()[:4]))
return nil, 0, false
}
// as long as we got candidate peers to connect to
// dont ask for new peers (want = false)
// try to select a candidate peer
// find the first callable peer
nxt := bpo[0]
k.addrs.EachBin(k.base, pof, nxt, func(po, _ int, f func(func(pot.Val, int) bool) bool) bool {
// for each bin (up until depth) we find callable candidate peers
if po >= depth {
return false
}
return f(func(val pot.Val, _ int) bool {
a = k.callable(val)
return a == nil
})
})
// found a candidate
if a != nil {
return a, 0, false
}
// no candidate peer found, request for the short bin
var changed bool
if uint8(nxt) < k.depth {
k.depth = uint8(nxt)
changed = true
}
return a, nxt, changed
}
// On inserts the peer as a kademlia peer into the live peers
func (k *Kademlia) On(p OverlayConn) (uint8, bool) {
k.lock.Lock()
defer k.lock.Unlock()
e := newEntry(p)
var ins bool
k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(v pot.Val) pot.Val {
// if not found live
if v == nil {
ins = true
// insert new online peer into conns
return e
}
// found among live peers, do nothing
return v
})
if ins {
// insert new online peer into addrs
k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
return e
})
// send new address count value only if the peer is inserted
if k.addrCountC != nil {
k.addrCountC <- k.addrs.Size()
}
}
log.Trace(k.string())
// calculate if depth of saturation changed
depth := uint8(k.saturation(k.MinBinSize))
var changed bool
if depth != k.depth {
changed = true
k.depth = depth
}
k.sendNeighbourhoodDepthChange()
return k.depth, changed
}
// NeighbourhoodDepthC returns the channel that sends a new kademlia
// neighbourhood depth on each change.
// Not receiving from the returned channel will block On function
// when the neighbourhood depth is changed.
func (k *Kademlia) NeighbourhoodDepthC() <-chan int {
if k.nDepthC == nil {
k.nDepthC = make(chan int)
}
return k.nDepthC
}
// sendNeighbourhoodDepthChange sends new neighbourhood depth to k.nDepth channel
// if it is initialized.
func (k *Kademlia) sendNeighbourhoodDepthChange() {
// nDepthC is initialized when NeighbourhoodDepthC is called and returned by it.
// It provides signaling of neighbourhood depth change.
// This part of the code is sending new neighbourhood depth to nDepthC if that condition is met.
if k.nDepthC != nil {
nDepth := k.neighbourhoodDepth()
if nDepth != k.nDepth {
k.nDepth = nDepth
k.nDepthC <- nDepth
}
}
}
// AddrCountC returns the channel that sends a new
// address count value on each change.
// Not receiving from the returned channel will block Register function
// when address count value changes.
func (k *Kademlia) AddrCountC() <-chan int {
if k.addrCountC == nil {
k.addrCountC = make(chan int)
}
return k.addrCountC
}
// Off removes a peer from among live peers
func (k *Kademlia) Off(p OverlayConn) {
k.lock.Lock()
defer k.lock.Unlock()
var del bool
k.addrs, _, _, _ = pot.Swap(k.addrs, p, pof, func(v pot.Val) pot.Val {
// v cannot be nil, must check otherwise we overwrite entry
if v == nil {
panic(fmt.Sprintf("connected peer not found %v", p))
}
del = true
return newEntry(p.Off())
})
if del {
k.conns, _, _, _ = pot.Swap(k.conns, p, pof, func(_ pot.Val) pot.Val {
// v cannot be nil, but no need to check
return nil
})
// send new address count value only if the peer is deleted
if k.addrCountC != nil {
k.addrCountC <- k.addrs.Size()
}
k.sendNeighbourhoodDepthChange()
}
}
func (k *Kademlia) EachBin(base []byte, pof pot.Pof, o int, eachBinFunc func(conn OverlayConn, po int) bool) {
k.lock.RLock()
defer k.lock.RUnlock()
var startPo int
var endPo int
kadDepth := k.neighbourhoodDepth()
k.conns.EachBin(base, pof, o, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
if startPo > 0 && endPo != k.MaxProxDisplay {
startPo = endPo + 1
}
if po < kadDepth {
endPo = po
} else {
endPo = k.MaxProxDisplay
}
for bin := startPo; bin <= endPo; bin++ {
f(func(val pot.Val, _ int) bool {
return eachBinFunc(val.(*entry).conn(), bin)
})
}
return true
})
}
// EachConn is an iterator with args (base, po, f) applies f to each live peer
// that has proximity order po or less as measured from the base
// if base is nil, kademlia base address is used
func (k *Kademlia) EachConn(base []byte, o int, f func(OverlayConn, int, bool) bool) {
k.lock.RLock()
defer k.lock.RUnlock()
k.eachConn(base, o, f)
}
func (k *Kademlia) eachConn(base []byte, o int, f func(OverlayConn, int, bool) bool) {
if len(base) == 0 {
base = k.base
}
depth := k.neighbourhoodDepth()
k.conns.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
if po > o {
return true
}
return f(val.(*entry).conn(), po, po >= depth)
})
}
// EachAddr called with (base, po, f) is an iterator applying f to each known peer
// that has proximity order po or less as measured from the base
// if base is nil, kademlia base address is used
func (k *Kademlia) EachAddr(base []byte, o int, f func(OverlayAddr, int, bool) bool) {
k.lock.RLock()
defer k.lock.RUnlock()
k.eachAddr(base, o, f)
}
func (k *Kademlia) eachAddr(base []byte, o int, f func(OverlayAddr, int, bool) bool) {
if len(base) == 0 {
base = k.base
}
depth := k.neighbourhoodDepth()
k.addrs.EachNeighbour(base, pof, func(val pot.Val, po int) bool {
if po > o {
return true
}
return f(val.(*entry).addr(), po, po >= depth)
})
}
// neighbourhoodDepth returns the proximity order that defines the distance of
// the nearest neighbour set with cardinality >= MinProxBinSize
// if there is altogether less than MinProxBinSize peers it returns 0
// caller must hold the lock
func (k *Kademlia) neighbourhoodDepth() (depth int) {
if k.conns.Size() < k.MinProxBinSize {
return 0
}
var size int
f := func(v pot.Val, i int) bool {
size++
depth = i
return size < k.MinProxBinSize
}
k.conns.EachNeighbour(k.base, pof, f)
return depth
}
// callable when called with val,
func (k *Kademlia) callable(val pot.Val) OverlayAddr {
e := val.(*entry)
// not callable if peer is live or exceeded maxRetries
if e.conn() != nil || e.retries > k.MaxRetries {
return nil
}
// calculate the allowed number of retries based on time lapsed since last seen
timeAgo := int64(time.Since(e.seenAt))
div := int64(k.RetryExponent)
div += (150000 - rand.Int63n(300000)) * div / 1000000
var retries int
for delta := timeAgo; delta > k.RetryInterval; delta /= div {
retries++
}
// this is never called concurrently, so safe to increment
// peer can be retried again
if retries < e.retries {
log.Trace(fmt.Sprintf("%08x: %v long time since last try (at %v) needed before retry %v, wait only warrants %v", k.BaseAddr()[:4], e, timeAgo, e.retries, retries))
return nil
}
// function to sanction or prevent suggesting a peer
if k.Reachable != nil && !k.Reachable(e.addr()) {
log.Trace(fmt.Sprintf("%08x: peer %v is temporarily not callable", k.BaseAddr()[:4], e))
return nil
}
e.retries++
log.Trace(fmt.Sprintf("%08x: peer %v is callable", k.BaseAddr()[:4], e))
return e.addr()
}
// BaseAddr return the kademlia base address
func (k *Kademlia) BaseAddr() []byte {
return k.base
}
// String returns kademlia table + kaddb table displayed with ascii
func (k *Kademlia) String() string {
k.lock.RLock()
defer k.lock.RUnlock()
return k.string()
}
// String returns kademlia table + kaddb table displayed with ascii
func (k *Kademlia) string() string {
wsrow := " "
var rows []string
rows = append(rows, "=========================================================================")
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()[:3]))
rows = append(rows, fmt.Sprintf("population: %d (%d), MinProxBinSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.MinProxBinSize, k.MinBinSize, k.MaxBinSize))
liverows := make([]string, k.MaxProxDisplay)
peersrows := make([]string, k.MaxProxDisplay)
depth := k.neighbourhoodDepth()
rest := k.conns.Size()
k.conns.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
var rowlen int
if po >= k.MaxProxDisplay {
po = k.MaxProxDisplay - 1
}
row := []string{fmt.Sprintf("%2d", size)}
rest -= size
f(func(val pot.Val, vpo int) bool {
e := val.(*entry)
row = append(row, fmt.Sprintf("%x", e.Address()[:2]))
rowlen++
return rowlen < 4
})
r := strings.Join(row, " ")
r = r + wsrow
liverows[po] = r[:31]
return true
})
k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
var rowlen int
if po >= k.MaxProxDisplay {
po = k.MaxProxDisplay - 1
}
if size < 0 {
panic("wtf")
}
row := []string{fmt.Sprintf("%2d", size)}
// we are displaying live peers too
f(func(val pot.Val, vpo int) bool {
e := val.(*entry)
row = append(row, Label(e))
rowlen++
return rowlen < 4
})
peersrows[po] = strings.Join(row, " ")
return true
})
for i := 0; i < k.MaxProxDisplay; i++ {
if i == depth {
rows = append(rows, fmt.Sprintf("============ DEPTH: %d ==========================================", i))
}
left := liverows[i]
right := peersrows[i]
if len(left) == 0 {
left = " 0 "
}
if len(right) == 0 {
right = " 0"
}
rows = append(rows, fmt.Sprintf("%03d %v | %v", i, left, right))
}
rows = append(rows, "=========================================================================")
return "\n" + strings.Join(rows, "\n")
}
// PeerPot keeps info about expected nearest neighbours and empty bins
// used for testing only
type PeerPot struct {
NNSet [][]byte
EmptyBins []int
}
// NewPeerPotMap creates a map of pot record of OverlayAddr with keys
// as hexadecimal representations of the address.
func NewPeerPotMap(kadMinProxSize int, addrs [][]byte) map[string]*PeerPot {
// create a table of all nodes for health check
np := pot.NewPot(nil, 0)
for _, addr := range addrs {
np, _, _ = pot.Add(np, addr, pof)
}
ppmap := make(map[string]*PeerPot)
for i, a := range addrs {
pl := 256
prev := 256
var emptyBins []int
var nns [][]byte
np.EachNeighbour(addrs[i], pof, func(val pot.Val, po int) bool {
a := val.([]byte)
if po == 256 {
return true
}
if pl == 256 || pl == po {
nns = append(nns, a)
}
if pl == 256 && len(nns) >= kadMinProxSize {
pl = po
prev = po
}
if prev < pl {
for j := prev; j > po; j-- {
emptyBins = append(emptyBins, j)
}
}
prev = po - 1
return true
})
for j := prev; j >= 0; j-- {
emptyBins = append(emptyBins, j)
}
log.Trace(fmt.Sprintf("%x NNS: %s", addrs[i][:4], LogAddrs(nns)))
ppmap[common.Bytes2Hex(a)] = &PeerPot{nns, emptyBins}
}
return ppmap
}
// saturation returns the lowest proximity order that the bin for that order
// has less than n peers
func (k *Kademlia) saturation(n int) int {
prev := -1
k.addrs.EachBin(k.base, pof, 0, func(po, size int, f func(func(val pot.Val, i int) bool) bool) bool {
prev++
return prev == po && size >= n
})
depth := k.neighbourhoodDepth()
if depth < prev {
return depth
}
return prev
}
// full returns true if all required bins have connected peers.
// It is used in Healthy function.
func (k *Kademlia) full(emptyBins []int) (full bool) {
prev := 0
e := len(emptyBins)
ok := true
depth := k.neighbourhoodDepth()
k.conns.EachBin(k.base, pof, 0, func(po, _ int, _ func(func(val pot.Val, i int) bool) bool) bool {
if prev == depth+1 {
return true
}
for i := prev; i < po; i++ {
e--
if e < 0 {
ok = false
return false
}
if emptyBins[e] != i {
log.Trace(fmt.Sprintf("%08x po: %d, i: %d, e: %d, emptybins: %v", k.BaseAddr()[:4], po, i, e, logEmptyBins(emptyBins)))
if emptyBins[e] < i {
panic("incorrect peerpot")
}
ok = false
return false
}
}
prev = po + 1
return true
})
if !ok {
return false
}
return e == 0
}
func (k *Kademlia) knowNearestNeighbours(peers [][]byte) bool {
pm := make(map[string]bool)
k.eachAddr(nil, 255, func(p OverlayAddr, po int, nn bool) bool {
if !nn {
return false
}
pk := fmt.Sprintf("%x", p.Address())
pm[pk] = true
return true
})
for _, p := range peers {
pk := fmt.Sprintf("%x", p)
if !pm[pk] {
log.Trace(fmt.Sprintf("%08x: known nearest neighbour %s not found", k.BaseAddr()[:4], pk[:8]))
return false
}
}
return true
}
func (k *Kademlia) gotNearestNeighbours(peers [][]byte) (got bool, n int, missing [][]byte) {
pm := make(map[string]bool)
k.eachConn(nil, 255, func(p OverlayConn, po int, nn bool) bool {
if !nn {
return false
}
pk := fmt.Sprintf("%x", p.Address())
pm[pk] = true
return true
})
var gots int
var culprits [][]byte
for _, p := range peers {
pk := fmt.Sprintf("%x", p)
if pm[pk] {
gots++
} else {
log.Trace(fmt.Sprintf("%08x: ExpNN: %s not found", k.BaseAddr()[:4], pk[:8]))
culprits = append(culprits, p)
}
}
return gots == len(peers), gots, culprits
}
// Health state of the Kademlia
type Health struct {
KnowNN bool // whether node knows all its nearest neighbours
GotNN bool // whether node is connected to all its nearest neighbours
CountNN int // amount of nearest neighbors connected to
CulpritsNN [][]byte // which known NNs are missing
Full bool // whether node has a peer in each kademlia bin (where there is such a peer)
Hive string
}
// Healthy reports the health state of the kademlia connectivity
// returns a Health struct
func (k *Kademlia) Healthy(pp *PeerPot) *Health {
k.lock.RLock()
defer k.lock.RUnlock()
gotnn, countnn, culpritsnn := k.gotNearestNeighbours(pp.NNSet)
knownn := k.knowNearestNeighbours(pp.NNSet)
full := k.full(pp.EmptyBins)
log.Trace(fmt.Sprintf("%08x: healthy: knowNNs: %v, gotNNs: %v, full: %v\n", k.BaseAddr()[:4], knownn, gotnn, full))
return &Health{knownn, gotnn, countnn, culpritsnn, full, k.string()}
}
func logEmptyBins(ebs []int) string {
var ebss []string
for _, eb := range ebs {
ebss = append(ebss, fmt.Sprintf("%d", eb))
}
return strings.Join(ebss, ", ")
}

@ -1,173 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package kademlia
import (
"fmt"
"math/rand"
"strings"
"github.com/ethereum/go-ethereum/common"
)
type Address common.Hash
func (a Address) String() string {
return fmt.Sprintf("%x", a[:])
}
func (a *Address) MarshalJSON() (out []byte, err error) {
return []byte(`"` + a.String() + `"`), nil
}
func (a *Address) UnmarshalJSON(value []byte) error {
*a = Address(common.HexToHash(string(value[1 : len(value)-1])))
return nil
}
// the string form of the binary representation of an address (only first 8 bits)
func (a Address) Bin() string {
var bs []string
for _, b := range a[:] {
bs = append(bs, fmt.Sprintf("%08b", b))
}
return strings.Join(bs, "")
}
/*
Proximity(x, y) returns the proximity order of the MSB distance between x and y
The distance metric MSB(x, y) of two equal length byte sequences x and y is the
value of the binary integer cast of the x^y, ie., x and y bitwise xor-ed.
the binary cast is big endian: most significant bit first (=MSB).
Proximity(x, y) is a discrete logarithmic scaling of the MSB distance.
It is defined as the reverse rank of the integer part of the base 2
logarithm of the distance.
It is calculated by counting the number of common leading zeros in the (MSB)
binary representation of the x^y.
(0 farthest, 255 closest, 256 self)
*/
func proximity(one, other Address) (ret int) {
for i := 0; i < len(one); i++ {
oxo := one[i] ^ other[i]
for j := 0; j < 8; j++ {
if (oxo>>uint8(7-j))&0x01 != 0 {
return i*8 + j
}
}
}
return len(one) * 8
}
// Address.ProxCmp compares the distances a->target and b->target.
// Returns -1 if a is closer to target, 1 if b is closer to target
// and 0 if they are equal.
func (target Address) ProxCmp(a, b Address) int {
for i := range target {
da := a[i] ^ target[i]
db := b[i] ^ target[i]
if da > db {
return 1
} else if da < db {
return -1
}
}
return 0
}
// randomAddressAt(address, prox) generates a random address
// at proximity order prox relative to address
// if prox is negative a random address is generated
func RandomAddressAt(self Address, prox int) (addr Address) {
addr = self
var pos int
if prox >= 0 {
pos = prox / 8
trans := prox % 8
transbytea := byte(0)
for j := 0; j <= trans; j++ {
transbytea |= 1 << uint8(7-j)
}
flipbyte := byte(1 << uint8(7-trans))
transbyteb := transbytea ^ byte(255)
randbyte := byte(rand.Intn(255))
addr[pos] = ((addr[pos] & transbytea) ^ flipbyte) | randbyte&transbyteb
}
for i := pos + 1; i < len(addr); i++ {
addr[i] = byte(rand.Intn(255))
}
return
}
// KeyRange(a0, a1, proxLimit) returns the address inclusive address
// range that contain addresses closer to one than other
func KeyRange(one, other Address, proxLimit int) (start, stop Address) {
prox := proximity(one, other)
if prox >= proxLimit {
prox = proxLimit
}
start = CommonBitsAddrByte(one, other, byte(0x00), prox)
stop = CommonBitsAddrByte(one, other, byte(0xff), prox)
return
}
func CommonBitsAddrF(self, other Address, f func() byte, p int) (addr Address) {
prox := proximity(self, other)
var pos int
if p <= prox {
prox = p
}
pos = prox / 8
addr = self
trans := byte(prox % 8)
var transbytea byte
if p > prox {
transbytea = byte(0x7f)
} else {
transbytea = byte(0xff)
}
transbytea >>= trans
transbyteb := transbytea ^ byte(0xff)
addrpos := addr[pos]
addrpos &= transbyteb
if p > prox {
addrpos ^= byte(0x80 >> trans)
}
addrpos |= transbytea & f()
addr[pos] = addrpos
for i := pos + 1; i < len(addr); i++ {
addr[i] = f()
}
return
}
func CommonBitsAddr(self, other Address, prox int) (addr Address) {
return CommonBitsAddrF(self, other, func() byte { return byte(rand.Intn(255)) }, prox)
}
func CommonBitsAddrByte(self, other Address, b byte, prox int) (addr Address) {
return CommonBitsAddrF(self, other, func() byte { return b }, prox)
}
// randomAddressAt() generates a random address
func RandomAddress() Address {
return RandomAddressAt(Address{}, -1)
}

@ -1,96 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package kademlia
import (
"math/rand"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func (Address) Generate(rand *rand.Rand, size int) reflect.Value {
var id Address
for i := 0; i < len(id); i++ {
id[i] = byte(uint8(rand.Intn(255)))
}
return reflect.ValueOf(id)
}
func TestCommonBitsAddrF(t *testing.T) {
a := Address(common.HexToHash("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
b := Address(common.HexToHash("0x8123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
c := Address(common.HexToHash("0x4123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
d := Address(common.HexToHash("0x0023456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
e := Address(common.HexToHash("0x01A3456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"))
ab := CommonBitsAddrF(a, b, func() byte { return byte(0x00) }, 10)
expab := Address(common.HexToHash("0x8000000000000000000000000000000000000000000000000000000000000000"))
if ab != expab {
t.Fatalf("%v != %v", ab, expab)
}
ac := CommonBitsAddrF(a, c, func() byte { return byte(0x00) }, 10)
expac := Address(common.HexToHash("0x4000000000000000000000000000000000000000000000000000000000000000"))
if ac != expac {
t.Fatalf("%v != %v", ac, expac)
}
ad := CommonBitsAddrF(a, d, func() byte { return byte(0x00) }, 10)
expad := Address(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
if ad != expad {
t.Fatalf("%v != %v", ad, expad)
}
ae := CommonBitsAddrF(a, e, func() byte { return byte(0x00) }, 10)
expae := Address(common.HexToHash("0x0180000000000000000000000000000000000000000000000000000000000000"))
if ae != expae {
t.Fatalf("%v != %v", ae, expae)
}
acf := CommonBitsAddrF(a, c, func() byte { return byte(0xff) }, 10)
expacf := Address(common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
if acf != expacf {
t.Fatalf("%v != %v", acf, expacf)
}
aeo := CommonBitsAddrF(a, e, func() byte { return byte(0x00) }, 2)
expaeo := Address(common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"))
if aeo != expaeo {
t.Fatalf("%v != %v", aeo, expaeo)
}
aep := CommonBitsAddrF(a, e, func() byte { return byte(0xff) }, 2)
expaep := Address(common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
if aep != expaep {
t.Fatalf("%v != %v", aep, expaep)
}
}
func TestRandomAddressAt(t *testing.T) {
var a Address
for i := 0; i < 100; i++ {
a = RandomAddress()
prox := rand.Intn(255)
b := RandomAddressAt(a, prox)
if proximity(a, b) != prox {
t.Fatalf("incorrect address prox(%v, %v) == %v (expected %v)", a, b, proximity(a, b), prox)
}
}
}

@ -1,350 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package kademlia
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
)
type NodeData interface {
json.Marshaler
json.Unmarshaler
}
// allow inactive peers under
type NodeRecord struct {
Addr Address // address of node
Url string // Url, used to connect to node
After time.Time // next call after time
Seen time.Time // last connected at time
Meta *json.RawMessage // arbitrary metadata saved for a peer
node Node
}
func (self *NodeRecord) setSeen() {
t := time.Now()
self.Seen = t
self.After = t
}
func (self *NodeRecord) String() string {
return fmt.Sprintf("<%v>", self.Addr)
}
// persisted node record database ()
type KadDb struct {
Address Address
Nodes [][]*NodeRecord
index map[Address]*NodeRecord
cursors []int
lock sync.RWMutex
purgeInterval time.Duration
initialRetryInterval time.Duration
connRetryExp int
}
func newKadDb(addr Address, params *KadParams) *KadDb {
return &KadDb{
Address: addr,
Nodes: make([][]*NodeRecord, params.MaxProx+1), // overwritten by load
cursors: make([]int, params.MaxProx+1),
index: make(map[Address]*NodeRecord),
purgeInterval: params.PurgeInterval,
initialRetryInterval: params.InitialRetryInterval,
connRetryExp: params.ConnRetryExp,
}
}
func (self *KadDb) findOrCreate(index int, a Address, url string) *NodeRecord {
defer self.lock.Unlock()
self.lock.Lock()
record, found := self.index[a]
if !found {
record = &NodeRecord{
Addr: a,
Url: url,
}
log.Info(fmt.Sprintf("add new record %v to kaddb", record))
// insert in kaddb
self.index[a] = record
self.Nodes[index] = append(self.Nodes[index], record)
} else {
log.Info(fmt.Sprintf("found record %v in kaddb", record))
}
// update last seen time
record.setSeen()
// update with url in case IP/port changes
record.Url = url
return record
}
// add adds node records to kaddb (persisted node record db)
func (self *KadDb) add(nrs []*NodeRecord, proximityBin func(Address) int) {
defer self.lock.Unlock()
self.lock.Lock()
var n int
var nodes []*NodeRecord
for _, node := range nrs {
_, found := self.index[node.Addr]
if !found && node.Addr != self.Address {
node.setSeen()
self.index[node.Addr] = node
index := proximityBin(node.Addr)
dbcursor := self.cursors[index]
nodes = self.Nodes[index]
// this is inefficient for allocation, need to just append then shift
newnodes := make([]*NodeRecord, len(nodes)+1)
copy(newnodes[:], nodes[:dbcursor])
newnodes[dbcursor] = node
copy(newnodes[dbcursor+1:], nodes[dbcursor:])
log.Trace(fmt.Sprintf("new nodes: %v, nodes: %v", newnodes, nodes))
self.Nodes[index] = newnodes
n++
}
}
if n > 0 {
log.Debug(fmt.Sprintf("%d/%d node records (new/known)", n, len(nrs)))
}
}
/*
next return one node record with the highest priority for desired
connection.
This is used to pick candidates for live nodes that are most wanted for
a higly connected low centrality network structure for Swarm which best suits
for a Kademlia-style routing.
* Starting as naive node with empty db, this implements Kademlia bootstrapping
* As a mature node, it fills short lines. All on demand.
The candidate is chosen using the following strategy:
We check for missing online nodes in the buckets for 1 upto Max BucketSize rounds.
On each round we proceed from the low to high proximity order buckets.
If the number of active nodes (=connected peers) is < rounds, then start looking
for a known candidate. To determine if there is a candidate to recommend the
kaddb node record database row corresponding to the bucket is checked.
If the row cursor is on position i, the ith element in the row is chosen.
If the record is scheduled not to be retried before NOW, the next element is taken.
If the record is scheduled to be retried, it is set as checked, scheduled for
checking and is returned. The time of the next check is in X (duration) such that
X = ConnRetryExp * delta where delta is the time past since the last check and
ConnRetryExp is constant obsoletion factor. (Note that when node records are added
from peer messages, they are marked as checked and placed at the cursor, ie.
given priority over older entries). Entries which were checked more than
purgeInterval ago are deleted from the kaddb row. If no candidate is found after
a full round of checking the next bucket up is considered. If no candidate is
found when we reach the maximum-proximity bucket, the next round starts.
node record a is more favoured to b a > b iff a is a passive node (record of
offline past peer)
|proxBin(a)| < |proxBin(b)|
|| (proxBin(a) < proxBin(b) && |proxBin(a)| == |proxBin(b)|)
|| (proxBin(a) == proxBin(b) && lastChecked(a) < lastChecked(b))
The second argument returned names the first missing slot found
*/
func (self *KadDb) findBest(maxBinSize int, binSize func(int) int) (node *NodeRecord, need bool, proxLimit int) {
// return nil, proxLimit indicates that all buckets are filled
defer self.lock.Unlock()
self.lock.Lock()
var interval time.Duration
var found bool
var purge []bool
var delta time.Duration
var cursor int
var count int
var after time.Time
// iterate over columns maximum bucketsize times
for rounds := 1; rounds <= maxBinSize; rounds++ {
ROUND:
// iterate over rows from PO 0 upto MaxProx
for po, dbrow := range self.Nodes {
// if row has rounds connected peers, then take the next
if binSize(po) >= rounds {
continue ROUND
}
if !need {
// set proxlimit to the PO where the first missing slot is found
proxLimit = po
need = true
}
purge = make([]bool, len(dbrow))
// there is a missing slot - finding a node to connect to
// select a node record from the relavant kaddb row (of identical prox order)
ROW:
for cursor = self.cursors[po]; !found && count < len(dbrow); cursor = (cursor + 1) % len(dbrow) {
count++
node = dbrow[cursor]
// skip already connected nodes
if node.node != nil {
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d/%d) already connected", node.Addr, po, cursor, len(dbrow)))
continue ROW
}
// if node is scheduled to connect
if node.After.After(time.Now()) {
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) skipped. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
continue ROW
}
delta = time.Since(node.Seen)
if delta < self.initialRetryInterval {
delta = self.initialRetryInterval
}
if delta > self.purgeInterval {
// remove node
purge[cursor] = true
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) unreachable since %v. Removed", node.Addr, po, cursor, node.Seen))
continue ROW
}
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) ready to be tried. seen at %v (%v ago), scheduled at %v", node.Addr, po, cursor, node.Seen, delta, node.After))
// scheduling next check
interval = delta * time.Duration(self.connRetryExp)
after = time.Now().Add(interval)
log.Debug(fmt.Sprintf("kaddb record %v (PO%03d:%d) selected as candidate connection %v. seen at %v (%v ago), selectable since %v, retry after %v (in %v)", node.Addr, po, cursor, rounds, node.Seen, delta, node.After, after, interval))
node.After = after
found = true
} // ROW
self.cursors[po] = cursor
self.delete(po, purge)
if found {
return node, need, proxLimit
}
} // ROUND
} // ROUNDS
return nil, need, proxLimit
}
// deletes the noderecords of a kaddb row corresponding to the indexes
// caller must hold the dblock
// the call is unsafe, no index checks
func (self *KadDb) delete(row int, purge []bool) {
var nodes []*NodeRecord
dbrow := self.Nodes[row]
for i, del := range purge {
if i == self.cursors[row] {
//reset cursor
self.cursors[row] = len(nodes)
}
// delete the entry to be purged
if del {
delete(self.index, dbrow[i].Addr)
continue
}
// otherwise append to new list
nodes = append(nodes, dbrow[i])
}
self.Nodes[row] = nodes
}
// save persists kaddb on disk (written to file on path in json format.
func (self *KadDb) save(path string, cb func(*NodeRecord, Node)) error {
defer self.lock.Unlock()
self.lock.Lock()
var n int
for _, b := range self.Nodes {
for _, node := range b {
n++
node.After = time.Now()
node.Seen = time.Now()
if cb != nil {
cb(node, node.node)
}
}
}
data, err := json.MarshalIndent(self, "", " ")
if err != nil {
return err
}
err = ioutil.WriteFile(path, data, os.ModePerm)
if err != nil {
log.Warn(fmt.Sprintf("unable to save kaddb with %v nodes to %v: %v", n, path, err))
} else {
log.Info(fmt.Sprintf("saved kaddb with %v nodes to %v", n, path))
}
return err
}
// Load(path) loads the node record database (kaddb) from file on path.
func (self *KadDb) load(path string, cb func(*NodeRecord, Node) error) (err error) {
defer self.lock.Unlock()
self.lock.Lock()
var data []byte
data, err = ioutil.ReadFile(path)
if err != nil {
return
}
err = json.Unmarshal(data, self)
if err != nil {
return
}
var n int
var purge []bool
for po, b := range self.Nodes {
purge = make([]bool, len(b))
ROW:
for i, node := range b {
if cb != nil {
err = cb(node, node.node)
if err != nil {
purge[i] = true
continue ROW
}
}
n++
if node.After.IsZero() {
node.After = time.Now()
}
self.index[node.Addr] = node
}
self.delete(po, purge)
}
log.Info(fmt.Sprintf("loaded kaddb with %v nodes from %v", n, path))
return
}
// accessor for KAD offline db count
func (self *KadDb) count() int {
defer self.lock.Unlock()
self.lock.Lock()
return len(self.index)
}

@ -1,454 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package kademlia
import (
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
//metrics variables
//For metrics, we want to count how many times peers are added/removed
//at a certain index. Thus we do that with an array of counters with
//entry for each index
var (
bucketAddIndexCount []metrics.Counter
bucketRmIndexCount []metrics.Counter
)
const (
bucketSize = 4
proxBinSize = 2
maxProx = 8
connRetryExp = 2
maxPeers = 100
)
var (
purgeInterval = 42 * time.Hour
initialRetryInterval = 42 * time.Millisecond
maxIdleInterval = 42 * 1000 * time.Millisecond
// maxIdleInterval = 42 * 10 0 * time.Millisecond
)
type KadParams struct {
// adjustable parameters
MaxProx int
ProxBinSize int
BucketSize int
PurgeInterval time.Duration
InitialRetryInterval time.Duration
MaxIdleInterval time.Duration
ConnRetryExp int
}
func NewDefaultKadParams() *KadParams {
return &KadParams{
MaxProx: maxProx,
ProxBinSize: proxBinSize,
BucketSize: bucketSize,
PurgeInterval: purgeInterval,
InitialRetryInterval: initialRetryInterval,
MaxIdleInterval: maxIdleInterval,
ConnRetryExp: connRetryExp,
}
}
// Kademlia is a table of active nodes
type Kademlia struct {
addr Address // immutable baseaddress of the table
*KadParams // Kademlia configuration parameters
proxLimit int // state, the PO of the first row of the most proximate bin
proxSize int // state, the number of peers in the most proximate bin
count int // number of active peers (w live connection)
buckets [][]Node // the actual bins
db *KadDb // kaddb, node record database
lock sync.RWMutex // mutex to access buckets
}
type Node interface {
Addr() Address
Url() string
LastActive() time.Time
Drop()
}
// public constructor
// add is the base address of the table
// params is KadParams configuration
func New(addr Address, params *KadParams) *Kademlia {
buckets := make([][]Node, params.MaxProx+1)
kad := &Kademlia{
addr: addr,
KadParams: params,
buckets: buckets,
db: newKadDb(addr, params),
}
kad.initMetricsVariables()
return kad
}
// accessor for KAD base address
func (self *Kademlia) Addr() Address {
return self.addr
}
// accessor for KAD active node count
func (self *Kademlia) Count() int {
defer self.lock.Unlock()
self.lock.Lock()
return self.count
}
// accessor for KAD active node count
func (self *Kademlia) DBCount() int {
return self.db.count()
}
// On is the entry point called when a new nodes is added
// unsafe in that node is not checked to be already active node (to be called once)
func (self *Kademlia) On(node Node, cb func(*NodeRecord, Node) error) (err error) {
log.Debug(fmt.Sprintf("%v", self))
defer self.lock.Unlock()
self.lock.Lock()
index := self.proximityBin(node.Addr())
record := self.db.findOrCreate(index, node.Addr(), node.Url())
if cb != nil {
err = cb(record, node)
log.Trace(fmt.Sprintf("cb(%v, %v) ->%v", record, node, err))
if err != nil {
return fmt.Errorf("unable to add node %v, callback error: %v", node.Addr(), err)
}
log.Debug(fmt.Sprintf("add node record %v with node %v", record, node))
}
// insert in kademlia table of active nodes
bucket := self.buckets[index]
// if bucket is full insertion replaces the worst node
// TODO: give priority to peers with active traffic
if len(bucket) < self.BucketSize { // >= allows us to add peers beyond the bucketsize limitation
self.buckets[index] = append(bucket, node)
bucketAddIndexCount[index].Inc(1)
log.Debug(fmt.Sprintf("add node %v to table", node))
self.setProxLimit(index, true)
record.node = node
self.count++
return nil
}
// always rotate peers
idle := self.MaxIdleInterval
var pos int
var replaced Node
for i, p := range bucket {
idleInt := time.Since(p.LastActive())
if idleInt > idle {
idle = idleInt
pos = i
replaced = p
}
}
if replaced == nil {
log.Debug(fmt.Sprintf("all peers wanted, PO%03d bucket full", index))
return fmt.Errorf("bucket full")
}
log.Debug(fmt.Sprintf("node %v replaced by %v (idle for %v > %v)", replaced, node, idle, self.MaxIdleInterval))
replaced.Drop()
// actually replace in the row. When off(node) is called, the peer is no longer in the row
bucket[pos] = node
// there is no change in bucket cardinalities so no prox limit adjustment is needed
record.node = node
self.count++
return nil
}
// Off is the called when a node is taken offline (from the protocol main loop exit)
func (self *Kademlia) Off(node Node, cb func(*NodeRecord, Node)) (err error) {
self.lock.Lock()
defer self.lock.Unlock()
index := self.proximityBin(node.Addr())
bucketRmIndexCount[index].Inc(1)
bucket := self.buckets[index]
for i := 0; i < len(bucket); i++ {
if node.Addr() == bucket[i].Addr() {
self.buckets[index] = append(bucket[:i], bucket[(i+1):]...)
self.setProxLimit(index, false)
break
}
}
record := self.db.index[node.Addr()]
// callback on remove
if cb != nil {
cb(record, record.node)
}
record.node = nil
self.count--
log.Debug(fmt.Sprintf("remove node %v from table, population now is %v", node, self.count))
return
}
// proxLimit is dynamically adjusted so that
// 1) there is no empty buckets in bin < proxLimit and
// 2) the sum of all items are the minimum possible but higher than ProxBinSize
// adjust Prox (proxLimit and proxSize after an insertion/removal of nodes)
// caller holds the lock
func (self *Kademlia) setProxLimit(r int, on bool) {
// if the change is outside the core (PO lower)
// and the change does not leave a bucket empty then
// no adjustment needed
if r < self.proxLimit && len(self.buckets[r]) > 0 {
return
}
// if on=a node was added, then r must be within prox limit so increment cardinality
if on {
self.proxSize++
curr := len(self.buckets[self.proxLimit])
// if now core is big enough without the furthest bucket, then contract
// this can result in more than one bucket change
for self.proxSize >= self.ProxBinSize+curr && curr > 0 {
self.proxSize -= curr
self.proxLimit++
curr = len(self.buckets[self.proxLimit])
log.Trace(fmt.Sprintf("proxbin contraction (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r))
}
return
}
// otherwise
if r >= self.proxLimit {
self.proxSize--
}
// expand core by lowering prox limit until hit zero or cover the empty bucket or reached target cardinality
for (self.proxSize < self.ProxBinSize || r < self.proxLimit) &&
self.proxLimit > 0 {
//
self.proxLimit--
self.proxSize += len(self.buckets[self.proxLimit])
log.Trace(fmt.Sprintf("proxbin expansion (size: %v, limit: %v, bin: %v)", self.proxSize, self.proxLimit, r))
}
}
/*
returns the list of nodes belonging to the same proximity bin
as the target. The most proximate bin will be the union of the bins between
proxLimit and MaxProx.
*/
func (self *Kademlia) FindClosest(target Address, max int) []Node {
self.lock.Lock()
defer self.lock.Unlock()
r := nodesByDistance{
target: target,
}
po := self.proximityBin(target)
index := po
step := 1
log.Trace(fmt.Sprintf("serving %v nodes at %v (PO%02d)", max, index, po))
// if max is set to 0, just want a full bucket, dynamic number
min := max
// set limit to max
limit := max
if max == 0 {
min = 1
limit = maxPeers
}
var n int
for index >= 0 {
// add entire bucket
for _, p := range self.buckets[index] {
r.push(p, limit)
n++
}
// terminate if index reached the bottom or enough peers > min
log.Trace(fmt.Sprintf("add %v -> %v (PO%02d, PO%03d)", len(self.buckets[index]), n, index, po))
if n >= min && (step < 0 || max == 0) {
break
}
// reach top most non-empty PO bucket, turn around
if index == self.MaxProx {
index = po
step = -1
}
index += step
}
log.Trace(fmt.Sprintf("serve %d (<=%d) nodes for target lookup %v (PO%03d)", n, max, target, po))
return r.nodes
}
func (self *Kademlia) Suggest() (*NodeRecord, bool, int) {
defer self.lock.RUnlock()
self.lock.RLock()
return self.db.findBest(self.BucketSize, func(i int) int { return len(self.buckets[i]) })
}
// adds node records to kaddb (persisted node record db)
func (self *Kademlia) Add(nrs []*NodeRecord) {
self.db.add(nrs, self.proximityBin)
}
// nodesByDistance is a list of nodes, ordered by distance to target.
type nodesByDistance struct {
nodes []Node
target Address
}
func sortedByDistanceTo(target Address, slice []Node) bool {
var last Address
for i, node := range slice {
if i > 0 {
if target.ProxCmp(node.Addr(), last) < 0 {
return false
}
}
last = node.Addr()
}
return true
}
// push(node, max) adds the given node to the list, keeping the total size
// below max elements.
func (h *nodesByDistance) push(node Node, max int) {
// returns the firt index ix such that func(i) returns true
ix := sort.Search(len(h.nodes), func(i int) bool {
return h.target.ProxCmp(h.nodes[i].Addr(), node.Addr()) >= 0
})
if len(h.nodes) < max {
h.nodes = append(h.nodes, node)
}
if ix < len(h.nodes) {
copy(h.nodes[ix+1:], h.nodes[ix:])
h.nodes[ix] = node
}
}
/*
Taking the proximity order relative to a fix point x classifies the points in
the space (n byte long byte sequences) into bins. Items in each are at
most half as distant from x as items in the previous bin. Given a sample of
uniformly distributed items (a hash function over arbitrary sequence) the
proximity scale maps onto series of subsets with cardinalities on a negative
exponential scale.
It also has the property that any two item belonging to the same bin are at
most half as distant from each other as they are from x.
If we think of random sample of items in the bins as connections in a network of interconnected nodes than relative proximity can serve as the basis for local
decisions for graph traversal where the task is to find a route between two
points. Since in every hop, the finite distance halves, there is
a guaranteed constant maximum limit on the number of hops needed to reach one
node from the other.
*/
func (self *Kademlia) proximityBin(other Address) (ret int) {
ret = proximity(self.addr, other)
if ret > self.MaxProx {
ret = self.MaxProx
}
return
}
// provides keyrange for chunk db iteration
func (self *Kademlia) KeyRange(other Address) (start, stop Address) {
defer self.lock.RUnlock()
self.lock.RLock()
return KeyRange(self.addr, other, self.proxLimit)
}
// save persists kaddb on disk (written to file on path in json format.
func (self *Kademlia) Save(path string, cb func(*NodeRecord, Node)) error {
return self.db.save(path, cb)
}
// Load(path) loads the node record database (kaddb) from file on path.
func (self *Kademlia) Load(path string, cb func(*NodeRecord, Node) error) (err error) {
return self.db.load(path, cb)
}
// kademlia table + kaddb table displayed with ascii
func (self *Kademlia) String() string {
defer self.lock.RUnlock()
self.lock.RLock()
defer self.db.lock.RUnlock()
self.db.lock.RLock()
var rows []string
rows = append(rows, "=========================================================================")
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %v", time.Now().UTC().Format(time.UnixDate), self.addr.String()[:6]))
rows = append(rows, fmt.Sprintf("population: %d (%d), proxLimit: %d, proxSize: %d", self.count, len(self.db.index), self.proxLimit, self.proxSize))
rows = append(rows, fmt.Sprintf("MaxProx: %d, ProxBinSize: %d, BucketSize: %d", self.MaxProx, self.ProxBinSize, self.BucketSize))
for i, bucket := range self.buckets {
if i == self.proxLimit {
rows = append(rows, fmt.Sprintf("============ PROX LIMIT: %d ==========================================", i))
}
row := []string{fmt.Sprintf("%03d", i), fmt.Sprintf("%2d", len(bucket))}
var k int
c := self.db.cursors[i]
for ; k < len(bucket); k++ {
p := bucket[(c+k)%len(bucket)]
row = append(row, p.Addr().String()[:6])
if k == 4 {
break
}
}
for ; k < 4; k++ {
row = append(row, " ")
}
row = append(row, fmt.Sprintf("| %2d %2d", len(self.db.Nodes[i]), self.db.cursors[i]))
for j, p := range self.db.Nodes[i] {
row = append(row, p.Addr.String()[:6])
if j == 3 {
break
}
}
rows = append(rows, strings.Join(row, " "))
if i == self.MaxProx {
}
}
rows = append(rows, "=========================================================================")
return strings.Join(rows, "\n")
}
//We have to build up the array of counters for each index
func (self *Kademlia) initMetricsVariables() {
//create the arrays
bucketAddIndexCount = make([]metrics.Counter, self.MaxProx+1)
bucketRmIndexCount = make([]metrics.Counter, self.MaxProx+1)
//at each index create a metrics counter
for i := 0; i < (self.KadParams.MaxProx + 1); i++ {
bucketAddIndexCount[i] = metrics.NewRegisteredCounter(fmt.Sprintf("network.kademlia.bucket.add.%d.index", i), nil)
bucketRmIndexCount[i] = metrics.NewRegisteredCounter(fmt.Sprintf("network.kademlia.bucket.rm.%d.index", i), nil)
}
}

@ -1,392 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package kademlia
import (
"fmt"
"math"
"math/rand"
"os"
"path/filepath"
"reflect"
"testing"
"testing/quick"
"time"
)
var (
quickrand = rand.New(rand.NewSource(time.Now().Unix()))
quickcfgFindClosest = &quick.Config{MaxCount: 50, Rand: quickrand}
quickcfgBootStrap = &quick.Config{MaxCount: 100, Rand: quickrand}
)
type testNode struct {
addr Address
}
func (n *testNode) String() string {
return fmt.Sprintf("%x", n.addr[:])
}
func (n *testNode) Addr() Address {
return n.addr
}
func (n *testNode) Drop() {
}
func (n *testNode) Url() string {
return ""
}
func (n *testNode) LastActive() time.Time {
return time.Now()
}
func TestOn(t *testing.T) {
addr, ok1 := gen(Address{}, quickrand).(Address)
other, ok2 := gen(Address{}, quickrand).(Address)
if !ok1 || !ok2 {
t.Errorf("oops")
}
kad := New(addr, NewDefaultKadParams())
err := kad.On(&testNode{addr: other}, nil)
_ = err
}
func TestBootstrap(t *testing.T) {
test := func(test *bootstrapTest) bool {
// for any node kad.le, Target and N
params := NewDefaultKadParams()
params.MaxProx = test.MaxProx
params.BucketSize = test.BucketSize
params.ProxBinSize = test.BucketSize
kad := New(test.Self, params)
var err error
for p := 0; p < 9; p++ {
var nrs []*NodeRecord
n := math.Pow(float64(2), float64(7-p))
for i := 0; i < int(n); i++ {
addr := RandomAddressAt(test.Self, p)
nrs = append(nrs, &NodeRecord{
Addr: addr,
})
}
kad.Add(nrs)
}
node := &testNode{test.Self}
n := 0
for n < 100 {
err = kad.On(node, nil)
if err != nil {
t.Fatalf("backend not accepting node: %v", err)
}
record, need, _ := kad.Suggest()
if !need {
break
}
n++
if record == nil {
continue
}
node = &testNode{record.Addr}
}
exp := test.BucketSize * (test.MaxProx + 1)
if kad.Count() != exp {
t.Errorf("incorrect number of peers, expected %d, got %d\n%v", exp, kad.Count(), kad)
return false
}
return true
}
if err := quick.Check(test, quickcfgBootStrap); err != nil {
t.Error(err)
}
}
func TestFindClosest(t *testing.T) {
test := func(test *FindClosestTest) bool {
// for any node kad.le, Target and N
params := NewDefaultKadParams()
params.MaxProx = 7
kad := New(test.Self, params)
var err error
for _, node := range test.All {
err = kad.On(node, nil)
if err != nil && err.Error() != "bucket full" {
t.Fatalf("backend not accepting node: %v", err)
}
}
if len(test.All) == 0 || test.N == 0 {
return true
}
nodes := kad.FindClosest(test.Target, test.N)
// check that the number of results is min(N, kad.len)
wantN := test.N
if tlen := kad.Count(); tlen < test.N {
wantN = tlen
}
if len(nodes) != wantN {
t.Errorf("wrong number of nodes: got %d, want %d", len(nodes), wantN)
return false
}
if hasDuplicates(nodes) {
t.Errorf("result contains duplicates")
return false
}
if !sortedByDistanceTo(test.Target, nodes) {
t.Errorf("result is not sorted by distance to target")
return false
}
// check that the result nodes have minimum distance to target.
farthestResult := nodes[len(nodes)-1].Addr()
for i, b := range kad.buckets {
for j, n := range b {
if contains(nodes, n.Addr()) {
continue // don't run the check below for nodes in result
}
if test.Target.ProxCmp(n.Addr(), farthestResult) < 0 {
_ = i * j
t.Errorf("kad.le contains node that is closer to target but it's not in result")
return false
}
}
}
return true
}
if err := quick.Check(test, quickcfgFindClosest); err != nil {
t.Error(err)
}
}
type proxTest struct {
add bool
index int
addr Address
}
var (
addresses []Address
)
func TestProxAdjust(t *testing.T) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
self := gen(Address{}, r).(Address)
params := NewDefaultKadParams()
params.MaxProx = 7
kad := New(self, params)
var err error
for i := 0; i < 100; i++ {
a := gen(Address{}, r).(Address)
addresses = append(addresses, a)
err = kad.On(&testNode{addr: a}, nil)
if err != nil && err.Error() != "bucket full" {
t.Fatalf("backend not accepting node: %v", err)
}
if !kad.proxCheck(t) {
return
}
}
test := func(test *proxTest) bool {
node := &testNode{test.addr}
if test.add {
kad.On(node, nil)
} else {
kad.Off(node, nil)
}
return kad.proxCheck(t)
}
if err := quick.Check(test, quickcfgFindClosest); err != nil {
t.Error(err)
}
}
func TestSaveLoad(t *testing.T) {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
addresses := gen([]Address{}, r).([]Address)
self := RandomAddress()
params := NewDefaultKadParams()
params.MaxProx = 7
kad := New(self, params)
var err error
for _, a := range addresses {
err = kad.On(&testNode{addr: a}, nil)
if err != nil && err.Error() != "bucket full" {
t.Fatalf("backend not accepting node: %v", err)
}
}
nodes := kad.FindClosest(self, 100)
path := filepath.Join(os.TempDir(), "bzz-kad-test-save-load.peers")
err = kad.Save(path, nil)
if err != nil && err.Error() != "bucket full" {
t.Fatalf("unepected error saving kaddb: %v", err)
}
kad = New(self, params)
err = kad.Load(path, nil)
if err != nil && err.Error() != "bucket full" {
t.Fatalf("unepected error loading kaddb: %v", err)
}
for _, b := range kad.db.Nodes {
for _, node := range b {
err = kad.On(&testNode{node.Addr}, nil)
if err != nil && err.Error() != "bucket full" {
t.Fatalf("backend not accepting node: %v", err)
}
}
}
loadednodes := kad.FindClosest(self, 100)
for i, node := range loadednodes {
if nodes[i].Addr() != node.Addr() {
t.Errorf("node mismatch at %d/%d: %v != %v", i, len(nodes), nodes[i].Addr(), node.Addr())
}
}
}
func (self *Kademlia) proxCheck(t *testing.T) bool {
var sum int
for i, b := range self.buckets {
l := len(b)
// if we are in the high prox multibucket
if i >= self.proxLimit {
sum += l
} else if l == 0 {
t.Errorf("bucket %d empty, yet proxLimit is %d\n%v", len(b), self.proxLimit, self)
return false
}
}
// check if merged high prox bucket does not exceed size
if sum > 0 {
if sum != self.proxSize {
t.Errorf("proxSize incorrect, expected %v, got %v", sum, self.proxSize)
return false
}
last := len(self.buckets[self.proxLimit])
if last > 0 && sum >= self.ProxBinSize+last {
t.Errorf("proxLimit %v incorrect, redundant non-empty bucket %d added to proxBin with %v (target %v)\n%v", self.proxLimit, last, sum-last, self.ProxBinSize, self)
return false
}
if self.proxLimit > 0 && sum < self.ProxBinSize {
t.Errorf("proxLimit %v incorrect. proxSize %v is less than target %v, yet there is more peers", self.proxLimit, sum, self.ProxBinSize)
return false
}
}
return true
}
type bootstrapTest struct {
MaxProx int
BucketSize int
Self Address
}
func (*bootstrapTest) Generate(rand *rand.Rand, size int) reflect.Value {
t := &bootstrapTest{
Self: gen(Address{}, rand).(Address),
MaxProx: 5 + rand.Intn(2),
BucketSize: rand.Intn(3) + 1,
}
return reflect.ValueOf(t)
}
type FindClosestTest struct {
Self Address
Target Address
All []Node
N int
}
func (c FindClosestTest) String() string {
return fmt.Sprintf("A: %064x\nT: %064x\n(%d)\n", c.Self[:], c.Target[:], c.N)
}
func (*FindClosestTest) Generate(rand *rand.Rand, size int) reflect.Value {
t := &FindClosestTest{
Self: gen(Address{}, rand).(Address),
Target: gen(Address{}, rand).(Address),
N: rand.Intn(bucketSize),
}
for _, a := range gen([]Address{}, rand).([]Address) {
t.All = append(t.All, &testNode{addr: a})
}
return reflect.ValueOf(t)
}
func (*proxTest) Generate(rand *rand.Rand, size int) reflect.Value {
var add bool
if rand.Intn(1) == 0 {
add = true
}
var t *proxTest
if add {
t = &proxTest{
addr: gen(Address{}, rand).(Address),
add: add,
}
} else {
t = &proxTest{
index: rand.Intn(len(addresses)),
add: add,
}
}
return reflect.ValueOf(t)
}
func hasDuplicates(slice []Node) bool {
seen := make(map[Address]bool)
for _, node := range slice {
if seen[node.Addr()] {
return true
}
seen[node.Addr()] = true
}
return false
}
func contains(nodes []Node, addr Address) bool {
for _, n := range nodes {
if n.Addr() == addr {
return true
}
}
return false
}
// gen wraps quick.Value so it's easier to use.
// it generates a random value of the given value's type.
func gen(typ interface{}, rand *rand.Rand) interface{} {
v, ok := quick.Value(reflect.TypeOf(typ), rand)
if !ok {
panic(fmt.Sprintf("couldn't generate random value of type %T", typ))
}
return v.Interface()
}

@ -0,0 +1,623 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"bytes"
"fmt"
"os"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/pot"
)
func init() {
h := log.LvlFilterHandler(log.LvlWarn, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))
log.Root().SetHandler(h)
}
func testKadPeerAddr(s string) *BzzAddr {
a := pot.NewAddressFromString(s)
return &BzzAddr{OAddr: a, UAddr: a}
}
type testDropPeer struct {
Peer
dropc chan error
}
type dropError struct {
error
addr string
}
func (d *testDropPeer) Drop(err error) {
err2 := &dropError{err, binStr(d)}
d.dropc <- err2
}
type testKademlia struct {
*Kademlia
Discovery bool
dropc chan error
}
func newTestKademlia(b string) *testKademlia {
params := NewKadParams()
params.MinBinSize = 1
params.MinProxBinSize = 2
base := pot.NewAddressFromString(b)
return &testKademlia{
NewKademlia(base, params),
false,
make(chan error),
}
}
func (k *testKademlia) newTestKadPeer(s string) Peer {
return &testDropPeer{&BzzPeer{BzzAddr: testKadPeerAddr(s)}, k.dropc}
}
func (k *testKademlia) On(ons ...string) *testKademlia {
for _, s := range ons {
k.Kademlia.On(k.newTestKadPeer(s).(OverlayConn))
}
return k
}
func (k *testKademlia) Off(offs ...string) *testKademlia {
for _, s := range offs {
k.Kademlia.Off(k.newTestKadPeer(s).(OverlayConn))
}
return k
}
func (k *testKademlia) Register(regs ...string) *testKademlia {
var as []OverlayAddr
for _, s := range regs {
as = append(as, testKadPeerAddr(s))
}
err := k.Kademlia.Register(as)
if err != nil {
panic(err.Error())
}
return k
}
func testSuggestPeer(t *testing.T, k *testKademlia, expAddr string, expPo int, expWant bool) error {
addr, o, want := k.SuggestPeer()
if binStr(addr) != expAddr {
return fmt.Errorf("incorrect peer address suggested. expected %v, got %v", expAddr, binStr(addr))
}
if o != expPo {
return fmt.Errorf("incorrect prox order suggested. expected %v, got %v", expPo, o)
}
if want != expWant {
return fmt.Errorf("expected SuggestPeer to want peers: %v", expWant)
}
return nil
}
func binStr(a OverlayPeer) string {
if a == nil {
return "<nil>"
}
return pot.ToBin(a.Address())[:8]
}
func TestSuggestPeerBug(t *testing.T) {
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
k := newTestKademlia("00000000").On(
"10000000", "11000000",
"01000000",
"00010000", "00011000",
).Off(
"01000000",
)
err := testSuggestPeer(t, k, "01000000", 0, false)
if err != nil {
t.Fatal(err.Error())
}
}
func TestSuggestPeerFindPeers(t *testing.T) {
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
k := newTestKademlia("00000000").On("00100000")
err := testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
// 2 row gap, saturated proxbin, no callables -> want PO 0
k.On("00010000")
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
// 1 row gap (1 less), saturated proxbin, no callables -> want PO 1
k.On("10000000")
err = testSuggestPeer(t, k, "<nil>", 1, false)
if err != nil {
t.Fatal(err.Error())
}
// no gap (1 less), saturated proxbin, no callables -> do not want more
k.On("01000000", "00100001")
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
// oversaturated proxbin, > do not want more
k.On("00100001")
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
// reintroduce gap, disconnected peer callable
// log.Info(k.String())
k.Off("01000000")
err = testSuggestPeer(t, k, "01000000", 0, false)
if err != nil {
t.Fatal(err.Error())
}
// second time disconnected peer not callable
// with reasonably set Interval
err = testSuggestPeer(t, k, "<nil>", 1, true)
if err != nil {
t.Fatal(err.Error())
}
// on and off again, peer callable again
k.On("01000000")
k.Off("01000000")
err = testSuggestPeer(t, k, "01000000", 0, false)
if err != nil {
t.Fatal(err.Error())
}
k.On("01000000")
// new closer peer appears, it is immediately wanted
k.Register("00010001")
err = testSuggestPeer(t, k, "00010001", 0, false)
if err != nil {
t.Fatal(err.Error())
}
// PO1 disconnects
k.On("00010001")
log.Info(k.String())
k.Off("01000000")
log.Info(k.String())
// second time, gap filling
err = testSuggestPeer(t, k, "01000000", 0, false)
if err != nil {
t.Fatal(err.Error())
}
k.On("01000000")
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
k.MinBinSize = 2
err = testSuggestPeer(t, k, "<nil>", 0, true)
if err != nil {
t.Fatal(err.Error())
}
k.Register("01000001")
err = testSuggestPeer(t, k, "01000001", 0, false)
if err != nil {
t.Fatal(err.Error())
}
k.On("10000001")
log.Trace(fmt.Sprintf("Kad:\n%v", k.String()))
err = testSuggestPeer(t, k, "<nil>", 1, true)
if err != nil {
t.Fatal(err.Error())
}
k.On("01000001")
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
k.MinBinSize = 3
k.Register("10000010")
err = testSuggestPeer(t, k, "10000010", 0, false)
if err != nil {
t.Fatal(err.Error())
}
k.On("10000010")
err = testSuggestPeer(t, k, "<nil>", 1, false)
if err != nil {
t.Fatal(err.Error())
}
k.On("01000010")
err = testSuggestPeer(t, k, "<nil>", 2, false)
if err != nil {
t.Fatal(err.Error())
}
k.On("00100010")
err = testSuggestPeer(t, k, "<nil>", 3, false)
if err != nil {
t.Fatal(err.Error())
}
k.On("00010010")
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
}
func TestSuggestPeerRetries(t *testing.T) {
t.Skip("Test is disabled, because it is flaky. It fails with kademlia_test.go:346: incorrect peer address suggested. expected <nil>, got 01000000")
// 2 row gap, unsaturated proxbin, no callables -> want PO 0
k := newTestKademlia("00000000")
k.RetryInterval = int64(100 * time.Millisecond) // cycle
k.MaxRetries = 50
k.RetryExponent = 2
sleep := func(n int) {
ts := k.RetryInterval
for i := 1; i < n; i++ {
ts *= int64(k.RetryExponent)
}
time.Sleep(time.Duration(ts))
}
k.Register("01000000")
k.On("00000001", "00000010")
err := testSuggestPeer(t, k, "01000000", 0, false)
if err != nil {
t.Fatal(err.Error())
}
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
sleep(1)
err = testSuggestPeer(t, k, "01000000", 0, false)
if err != nil {
t.Fatal(err.Error())
}
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
sleep(1)
err = testSuggestPeer(t, k, "01000000", 0, false)
if err != nil {
t.Fatal(err.Error())
}
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
sleep(2)
err = testSuggestPeer(t, k, "01000000", 0, false)
if err != nil {
t.Fatal(err.Error())
}
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
sleep(2)
err = testSuggestPeer(t, k, "<nil>", 0, false)
if err != nil {
t.Fatal(err.Error())
}
}
func TestKademliaHiveString(t *testing.T) {
k := newTestKademlia("00000000").On("01000000", "00100000").Register("10000000", "10000001")
k.MaxProxDisplay = 8
h := k.String()
expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 000000\npopulation: 2 (4), MinProxBinSize: 2, MinBinSize: 1, MaxBinSize: 4\n000 0 | 2 8100 (0) 8000 (0)\n============ DEPTH: 1 ==========================================\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
if expH[104:] != h[104:] {
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
}
}
// testKademliaCase constructs the kademlia and PeerPot map to validate
// the SuggestPeer and Healthy methods for provided hex-encoded addresses.
// Argument pivotAddr is the address of the kademlia.
func testKademliaCase(t *testing.T, pivotAddr string, addrs ...string) {
addr := common.FromHex(pivotAddr)
addrs = append(addrs, pivotAddr)
k := NewKademlia(addr, NewKadParams())
as := make([][]byte, len(addrs))
for i, a := range addrs {
as[i] = common.FromHex(a)
}
for _, a := range as {
if bytes.Equal(a, addr) {
continue
}
p := &BzzAddr{OAddr: a, UAddr: a}
if err := k.Register([]OverlayAddr{p}); err != nil {
t.Fatal(err)
}
}
ppmap := NewPeerPotMap(2, as)
pp := ppmap[pivotAddr]
for {
a, _, _ := k.SuggestPeer()
if a == nil {
break
}
k.On(&BzzPeer{BzzAddr: a.(*BzzAddr)})
}
h := k.Healthy(pp)
if !(h.GotNN && h.KnowNN && h.Full) {
t.Error("not healthy")
}
}
/*
The regression test for the following invalid kademlia edge case.
Addresses used in this test are discovered as part of the simulation network
in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 12:18:24 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
population: 9 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 d7e5 ec56 | 18 ec56 (0) d7e5 (0) d9e0 (0) c735 (0)
001 2 18f1 3176 | 14 18f1 (0) 10bb (0) 10d1 (0) 0421 (0)
002 2 52aa 47cd | 11 52aa (0) 51d9 (0) 5161 (0) 5130 (0)
003 1 646e | 1 646e (0)
004 0 | 3 769c (0) 76d1 (0) 7656 (0)
============ DEPTH: 5 ==========================================
005 1 7a48 | 1 7a48 (0)
006 1 7cbd | 1 7cbd (0)
007 0 | 0
008 0 | 0
009 0 | 0
010 0 | 0
011 0 | 0
012 0 | 0
013 0 | 0
014 0 | 0
015 0 | 0
=========================================================================
*/
func TestKademliaCase1(t *testing.T) {
testKademliaCase(t,
"7efef1c41d77f843ad167be95f6660567eb8a4a59f39240000cce2e0d65baf8e",
"ec560e6a4806aa37f147ee83687f3cf044d9953e61eedb8c34b6d50d9e2c5623",
"646e9540c84f6a2f9cf6585d45a4c219573b4fd1b64a3c9a1386fc5cf98c0d4d",
"18f13c5fba653781019025ab10e8d2fdc916d6448729268afe9e928ffcdbb8e8",
"317617acf99b4ffddda8a736f8fc6c6ede0bf690bc23d834123823e6d03e2f69",
"d7e52d9647a5d1c27a68c3ee65d543be3947ae4b68537b236d71ef9cb15fb9ab",
"7a48f75f8ca60487ae42d6f92b785581b40b91f2da551ae73d5eae46640e02e8",
"7cbd42350bde8e18ae5b955b5450f8e2cef3419f92fbf5598160c60fd78619f0",
"52aa3ddec61f4d48dd505a2385403c634f6ad06ee1d99c5c90a5ba6006f9af9c",
"47cdb6fa93eeb8bc91a417ff4e3b14a9c2ea85137462e2f575fae97f0c4be60d",
"5161943eb42e2a03e715fe8afa1009ff5200060c870ead6ab103f63f26cb107f",
"a38eaa1255f76bf883ca0830c86e8c4bb7eed259a8348aae9b03f21f90105bee",
"b2522bdf1ab26f324e75424fdf6e493b47e8a27687fe76347607b344fc010075",
"5bd7213964efb2580b91d02ac31ef126838abeba342f5dbdbe8d4d03562671a2",
"0b531adb82744768b694d7f94f73d4f0c9de591266108daeb8c74066bfc9c9ca",
"28501f59f70e888d399570145ed884353e017443c675aa12731ada7c87ea14f7",
"4a45f1fc63e1a9cb9dfa44c98da2f3d20c2923e5d75ff60b2db9d1bdb0c54d51",
"b193431ee35cd32de95805e7c1c749450c47486595aae7195ea6b6019a64fd61",
"baebf36a1e35a7ed834e1c72faf44ba16c159fa47d3289ceb3ca35fefa8739b5",
"a3659bd32e05fa36c8d20dbaaed8362bf1a8a7bd116aed62d8a43a2efbdf513f",
"10d1b50881a4770ebebdd0a75589dabb931e6716747b0f65fd6b080b88c4fdb6",
"3c76b8ca5c7ce6a03320646826213f59229626bf5b9d25da0c3ec0662dcb8ff3",
"4d72a04ddeb851a68cd197ef9a92a3e2ff01fbbff638e64929dd1a9c2e150112",
"c7353d320987956075b5bc1668571c7a36c800d5598fdc4832ec6569561e15d1",
"d9e0c7c90878c20ab7639d5954756f54775404b3483407fe1b483635182734f6",
"8fca67216b7939c0824fb06c5279901a94da41da9482b000f56df9906736ee75",
"460719d7f7aa7d7438f0eaf30333484fa3bd0f233632c10ba89e6e46dd3604be",
"0421d92c8a1c79ed5d01305a3d25aaf22a8f5f9e3d4bc80da47ee16ce20465fe",
"3441d9d9c0f05820a1bb6459fc7d8ef266a1bd929e7db939a10f544efe8261ea",
"ab198a66c293586746758468c610e5d3914d4ce629147eff6dd55a31f863ff8f",
"3a1c8c16b0763f3d2c35269f454ff779d1255e954d2deaf6c040fb3f0bcdc945",
"5561c0ea3b203e173b11e6aa9d0e621a4e10b1d8b178b8fe375220806557b823",
"7656caccdc79cd8d7ce66d415cc96a718e8271c62fb35746bfc2b49faf3eebf3",
"5130594fd54c1652cf2debde2c4204573ed76555d1e26757fe345b409af1544a",
"76d1e83c71ca246d042e37ff1db181f2776265fbcfdc890ce230bfa617c9c2f0",
"89580231962624c53968c1b0095b4a2732b2a2640a19fdd7d21fd064fcc0a5ef",
"3d10d001fff44680c7417dd66ecf2e984f0baa20a9bbcea348583ba5ff210c4f",
"43754e323f0f3a1155b1852bd6edd55da86b8c4cfe3df8b33733fca50fc202b8",
"a9e7b1bb763ae6452ddcacd174993f82977d81a85206bb2ae3c842e2d8e19b4c",
"10bb07da7bc7c7757f74149eff167d528a94a253cdc694a863f4d50054c00b6d",
"28f0bc1b44658548d6e05dd16d4c2fe77f1da5d48b6774bc4263b045725d0c19",
"835fbbf1d16ba7347b6e2fc552d6e982148d29c624ea20383850df3c810fa8fc",
"8e236c56a77d7f46e41e80f7092b1a68cd8e92f6156365f41813ad1ca2c6b6f3",
"51d9c857e9238c49186e37b4eccf17a82de3d5739f026f6043798ab531456e73",
"bbddf7db6a682225301f36a9fd5b0d0121d2951753e1681295f3465352ad511f",
"2690a910c33ee37b91eb6c4e0731d1d345e2dc3b46d308503a6e85bbc242c69e",
"769ce86aa90b518b7ed382f9fdacfbed93574e18dc98fe6c342e4f9f409c2d5a",
"ba3bebec689ce51d3e12776c45f80d25164fdfb694a8122d908081aaa2e7122c",
"3a51f4146ea90a815d0d283d1ceb20b928d8b4d45875e892696986a3c0d8fb9b",
"81968a2d8fb39114342ee1da85254ec51e0608d7f0f6997c2a8354c260a71009",
)
}
/*
The regression test for the following invalid kademlia edge case.
Addresses used in this test are discovered as part of the simulation network
in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 18:43:48 UTC 2018 KΛÐΞMLIΛ hive: queen's address: bc7f3b
population: 9 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 0f49 67ff | 28 0f49 (0) 0211 (0) 07b2 (0) 0703 (0)
001 2 e84b f3a4 | 13 f3a4 (0) e84b (0) e58b (0) e60b (0)
002 1 8dba | 1 8dba (0)
003 2 a008 ad72 | 2 ad72 (0) a008 (0)
004 0 | 3 b61f (0) b27f (0) b027 (0)
============ DEPTH: 5 ==========================================
005 1 ba19 | 1 ba19 (0)
006 0 | 0
007 1 bdd6 | 1 bdd6 (0)
008 0 | 0
009 0 | 0
010 0 | 0
011 0 | 0
012 0 | 0
013 0 | 0
014 0 | 0
015 0 | 0
=========================================================================
*/
func TestKademliaCase2(t *testing.T) {
testKademliaCase(t,
"bc7f3b6a4a7e3c91b100ca6680b6c06ff407972b88956324ca853295893e0237", "67ffb61d3aa27449d277016188f35f19e2321fbda5008c68cf6303faa080534f", "600cd54c842eadac1729c04abfc369bc244572ca76117105b9dd910283b82730", "d955a05409650de151218557425105a8aa2867bb6a0e0462fa1cf90abcf87ad6", "7a6b726de45abdf7bb3e5fd9fb0dc8932270ca4dedef92238c80c05bcdb570e3", "263e99424ebfdb652adb4e3dcd27d59e11bb7ae1c057b3ef6f390d0228006254", "ba195d1a53aafde68e661c64d39db8c2a73505bf336125c15c3560de3b48b7ed", "3458c762169937115f67cabc35a6c384ed70293a8aec37b077a6c1b8e02d510e", "4ef4dc2e28ac6efdba57e134ac24dd4e0be68b9d54f7006515eb9509105f700c", "2a8782b79b0c24b9714dfd2c8ff1932bebc08aa6520b4eaeaa59ff781238890c", "625d02e960506f4524e9cdeac85b33faf3ea437fceadbd478b62b78720cf24fc", "e051a36a8c8637f520ba259c9ed3fadaf740dadc6a04c3f0e21778ebd4cd6ac4", "e34bc014fa2504f707bb3d904872b56c2fa250bee3cb19a147a0418541f1bd90", "28036dc79add95799916893890add5d8972f3b95325a509d6ded3d448f4dc652", "1b013c407794fa2e4c955d8f51cbc6bd78588a174b6548246b291281304b5409", "34f71b68698e1534095ff23ee9c35bf64c7f12b8463e7c6f6b19c25cf03928b4", "c712c6e9bbb7076832972a95890e340b94ed735935c3c0bb788e61f011b59479", "a008d5becdcda4b9dbfdaafc3cec586cf61dcf2d4b713b6168fff02e3b9f0b08", "29de15555cdbebaab214009e416ee92f947dcec5dab9894129f50f1b17138f34", "5df9449f700bd4b5a23688b68b293f2e92fa6ca524c93bc6bb9936efba9d9ada", "3ab0168a5f87fedc6a39b53c628256ac87a98670d8691bbdaaecec22418d13a2", "1ee299b2d2a74a568494130e6869e66d57982d345c482a0e0eeb285ac219ae3b", "e0e0e3b860cea9b7a74cf1b0675cc632dc64e80a02f20bbc5e96e2e8bb670606", "dc1ba6f169b0fcdcca021dcebaf39fe5d4875e7e69b854fad65687c1d7719ec0", "d321f73e42fcfb1d3a303eddf018ca5dffdcfd5567cd5ec1212f045f6a07e47d", "070320c3da7b542e5ca8aaf6a0a53d2bb5113ed264ab1db2dceee17c729edcb1", "17d314d65fdd136b50d182d2c8f5edf16e7838c2be8cf2c00abe4b406dbcd1d8", "e60b99e0a06f7d2d99d84085f67cdf8cc22a9ae22c339365d80f90289834a2b4", "02115771e18932e1f67a45f11f5bf743c5dae97fbc477d34d35c996012420eac", "3102a40eb2e5060353dd19bf61eeec8782dd1bebfcb57f4c796912252b591827", "8dbaf231062f2dc7ddaba5f9c7761b0c21292be51bf8c2ef503f31d4a2f63f79", "b02787b713c83a9f9183216310f04251994e04c2763a9024731562e8978e7cc4", "b27fe6cd33989e10909ce794c4b0b88feae286b614a59d49a3444c1a7b51ea82", "07b2d2c94fdc6fd148fe23be2ed9eff54f5e12548f29ed8416e6860fc894466f", "e58bf9f451ef62ac44ff0a9bb0610ec0fd14d423235954f0d3695e83017cbfc4", "bdd600b91bb79d1ee0053b854de308cfaa7e2abce575ea6815a0a7b3449609c2", "0f49c93c1edc7999920b21977cedd51a763940dac32e319feb9c1df2da0f3071", "7cbf0297cd41acf655cd6f960d7aaf61479edb4189d5c001cbc730861f0deb41", "79265193778d87ad626a5f59397bc075872d7302a12634ce2451a767d0a82da2", "2fe7d705f7c370b9243dbaafe007d555ff58d218822fca49d347b12a0282457c", "e84bc0c83d05e55a0080eed41dda5a795da4b9313a4da697142e69a65834cbb3", "cc4d278bd9aa0e9fb3cd8d2e0d68fb791aab5de4b120b845c409effbed47a180", "1a2317a8646cd4b6d3c4aa4cc25f676533abb689cf180787db216880a1239ad8", "cbafd6568cf8e99076208e6b6843f5808a7087897c67aad0c54694669398f889", "7b7c8357255fc37b4dae0e1af61589035fd39ff627e0938c6b3da8b4e4ec5d23", "2b8d782c1f5bac46c922cf439f6aa79f91e9ba5ffc0020d58455188a2075b334", "b61f45af2306705740742e76197a119235584ced01ef3f7cf3d4370f6c557cd1", "2775612e7cdae2780bf494c370bdcbe69c55e4a1363b1dc79ea0135e61221cce", "f3a49bb22f40885e961299abfa697a7df690a79f067bf3a4847a3ad48d826c9f", "ad724ac218dc133c0aadf4618eae21fdd0c2f3787af279846b49e2b4f97ff167",
)
}
/*
The regression test for the following invalid kademlia edge case.
Addresses used in this test are discovered as part of the simulation network
in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:04:35 UTC 2018 KΛÐΞMLIΛ hive: queen's address: b4822e
population: 8 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 786c 774b | 29 774b (0) 786c (0) 7a79 (0) 7d2f (0)
001 2 d9de cf19 | 10 cf19 (0) d9de (0) d2ff (0) d2a2 (0)
002 2 8ca1 8d74 | 5 8d74 (0) 8ca1 (0) 9793 (0) 9f51 (0)
003 0 | 0
004 0 | 3 bfac (0) bcbb (0) bde9 (0)
005 0 | 0
============ DEPTH: 6 ==========================================
006 1 b660 | 1 b660 (0)
007 0 | 0
008 1 b450 | 1 b450 (0)
009 0 | 0
010 0 | 0
011 0 | 0
012 0 | 0
013 0 | 0
014 0 | 0
015 0 | 0
=========================================================================
*/
func TestKademliaCase3(t *testing.T) {
testKademliaCase(t,
"b4822e874a01b94ac3a35c821e6db131e785c2fcbb3556e84b36102caf09b091", "2ecf54ea38d58f9cfc3862e54e5854a7c506fbc640e0b38e46d7d45a19794999", "442374092be50fc7392e8dd3f6fab3158ff7f14f26ff98060aed9b2eecf0b97d", "b450a4a67fcfa3b976cf023d8f1f15052b727f712198ce901630efe2f95db191", "9a7291638eb1c989a6dd6661a42c735b23ac6605b5d3e428aa5ffe650e892c85", "67f62eeab9804cfcac02b25ebeab9113d1b9d03dd5200b1c5a324cc0163e722f", "2e4a0e4b53bca4a9d7e2734150e9f579f29a255ade18a268461b20d026c9ee90", "30dd79c5fcdaa1b106f6960c45c9fde7c046aa3d931088d98c52ab759d0b2ac4", "97936fb5a581e59753c54fa5feec493714f2218245f61f97a62eafd4699433e4", "3a2899b6e129e3e193f6e2aefb82589c948c246d2ec1d4272af32ef3b2660f44", "f0e2a8aa88e67269e9952431ef12e5b29b7f41a1871fbfc38567fad95655d607", "7fa12b3f3c5f8383bfc644b958f72a486969733fa097d8952b3eb4f7b4f73192", "360c167aad5fc992656d6010ec45fdce5bcd492ad9608bc515e2be70d4e430c1", "fe21bc969b3d8e5a64a6484a829c1e04208f26f3cd4de6afcbc172a5bd17f1f1", "b660a1f40141d7ccd282fe5bd9838744119bd1cb3780498b5173578cc5ad308f", "44dcb3370e76680e2fba8cd986ad45ff0b77ca45680ee8d950e47922c4af6226", "8ca126923d17fccb689647307b89f38aa14e2a7b9ebcf3c1e31ccf3d2291a3bc", "f0ae19ae9ce6329327cbf42baf090e084c196b0877d8c7b69997e0123be23ef8", "d2a2a217385158e3e1e348883a14bc423e57daa12077e8c49797d16121ea0810", "f5467ccd85bb4ebe768527db520a210459969a5f1fae6e07b43f519799f0b224", "68be5fd9f9d142a5099e3609011fe3bab7bb992c595999e31e0b3d1668dfb3cf", "4d49a8a476e4934afc6b5c36db9bece3ed1804f20b952da5a21b2b0de766aa73", "ea7155745ef3fb2d099513887a2ba279333ced65c65facbd890ce58bd3fce772", "cf19f51f4e848053d289ac95a9138cdd23fc3077ae913cd58cda8cc7a521b2e1", "590b1cd41c7e6144e76b5cd515a3a4d0a4317624620a3f1685f43ae68bdcd890", "d2ffe0626b5f94a7e00fa0b506e7455a3d9399c15800db108d5e715ef5f6e346", "69630878c50a91f6c2edd23a706bfa0b50bd5661672a37d67bab38e6bca3b698", "445e9067079899bb5faafaca915ae6c0f6b1b730a5a628835dd827636f7feb1e", "6461c77491f1c4825958949f23c153e6e1759a5be53abbcee17c9da3867f3141", "23a235f4083771ccc207771daceda700b525a59ab586788d4f6892e69e34a6e2", "bde99f79ef41a81607ddcf92b9f95dcbc6c3537e91e8bf740e193dc73b19485e", "177957c0e5f0fbd12b88022a91768095d193830986caec8d888097d3ff4310b8", "bcbbdbaa4cdf8352422072f332e05111b732354a35c4d7c617ce1fc3b8b42a5a", "774b6717fdfb0d1629fb9d4c04a9ca40079ae2955d7f82e897477055ed017abb", "16443bf625be6d39ecaa6f114e5d2c1d47a64bfd3c13808d94b55b6b6acef2ee", "8d7495d9008066505ed00ce8198af82bfa5a6b4c08768b4c9fb3aa4eb0b0cca2", "15800849a53349508cb382959527f6c3cf1a46158ff1e6e2316b7dea7967e35f", "7a792f0f4a2b731781d1b244b2a57947f1a2e32900a1c0793449f9f7ae18a7b7", "5e517c2832c9deaa7df77c7bad4d20fd6eda2b7815e155e68bc48238fac1416f", "9f51a14f0019c72bd1d472706d8c80a18c1873c6a0663e754b60eae8094483d7", "7d2fabb565122521d22ba99fed9e5be6a458fbc93156d54db27d97a00b8c3a97", "786c9e412a7db4ec278891fa534caa9a1d1a028c631c6f3aeb9c4d96ad895c36", "3bd6341d40641c2632a5a0cd7a63553a04e251efd7195897a1d27e02a7a8bfde", "31efd1f5fb57b8cff0318d77a1a9e8d67e1d1c8d18ce90f99c3a240dff48cdc8", "d9de3e1156ce1380150948acbcfecd99c96e7f4b0bc97745f4681593d017f74f", "427a2201e09f9583cd990c03b81b58148c297d474a3b50f498d83b1c7a9414cd", "bfaca11596d3dec406a9fcf5d97536516dfe7f0e3b12078428a7e1700e25218a", "351c4770a097248a650008152d0cab5825d048bef770da7f3364f59d1e721bc0", "ee00f205d1486b2be7381d962bd2867263758e880529e4e2bfedfa613bbc0e71", "6aa3b6418d89e3348e4859c823ef4d6d7cd46aa7f7e77aba586c4214d760d8f8",
)
}
/*
The regression test for the following invalid kademlia edge case.
Addresses used in this test are discovered as part of the simulation network
in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:16:25 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 9a90fe
population: 8 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 72ef 4e6c | 24 0b1e (0) 0d66 (0) 17f5 (0) 17e8 (0)
001 2 fc2b fa47 | 13 fa47 (0) fc2b (0) fffd (0) ecef (0)
002 2 b847 afa8 | 6 afa8 (0) ad77 (0) bb7c (0) b847 (0)
003 0 | 0
004 0 | 4 91fc (0) 957d (0) 9482 (0) 949a (0)
============ DEPTH: 5 ==========================================
005 1 9ccf | 1 9ccf (0)
006 0 | 0
007 1 9bb2 | 1 9bb2 (0)
008 0 | 0
009 0 | 0
010 0 | 0
011 0 | 0
012 0 | 0
013 0 | 0
014 0 | 0
015 0 | 0
=========================================================================
*/
func TestKademliaCase4(t *testing.T) {
testKademliaCase(t,
"9a90fe3506277244549064b8c3276abb06284a199d9063a97331947f2b7da7f4",
"c19359eddef24b7be1a833b4475f212cd944263627a53f9ef4837d106c247730", "fc2b6fef99ef947f7e57c3df376891769e2a2fd83d2b8e634e0fc1e91eaa080c", "ecefc0e1a8ea7bb4b48c469e077401fce175dd75294255b96c4e54f6a2950a55", "bb7ce598efc056bba343cc2614aa3f67a575557561290b44c73a63f8f433f9f7", "55fbee6ca52dfd7f0be0db969ee8e524b654ab4f0cce7c05d83887d7d2a15460", "afa852b6b319998c6a283cc0c82d2f5b8e9410075d7700f3012761f1cfbd0f76", "36c370cfb63f2087971ba6e58d7585b04e16b8f0da335efb91554c2dd8fe191c", "6be41e029985edebc901fb77fc4fb65516b6d85086e2a98bfa3159c99391e585", "dd3cfc72ea553e7d2b28f0037a65646b30955b929d29ba4c40f4a2a811248e77", "da3a8f18e09c7b0ca235c4e33e1441a5188f1df023138bf207753ee63e768f7d", "de9e3ab4dc572d54a2d4b878329fd832bb51a149f4ce167316eeb177b61e7e01", "4e6c1ecde6ed917706257fe020a1d02d2e9d87fca4c85f0f7b132491008c5032", "72ef04b77a070e13463b3529dd312bcacfb7a12d20dc597f5ec3de0501e9b834", "3fef57186675d524ab8bb1f54ba8cb68610babca1247c0c46dbb60aed003c69d", "1d8e6b71f7a052865d6558d4ba44ad5fab7b908cc1badf5766822e1c20d0d823", "6be2f2b4ffa173014d4ec7df157d289744a2bda54bb876b264ccfa898a0da315", "b0ba3fff8643f9985c744327b0c4c869763509fd5da2de9a80a4a0a082021255", "9ccf40b9406ba2e6567101fb9b4e5334a9ec74263eff47267da266ba45e6c158", "d7347f02c180a448e60f73931845062ce00048750b584790278e9c93ef31ad81", "b68c6359a22b3bee6fecb8804311cfd816648ea31d530c9fb48e477e029d707a", "0d668a18ad7c2820214df6df95a6c855ce19fb1cb765f8ca620e45db76686d37", "3fbd2663bff65533246f1fabb9f38086854c6218aeb3dc9ac6ac73d4f0988f91", "949aa5719ca846052bfaa1b38c97b6eca3df3e24c0e0630042c6bccafbb4cdb5", "77b8a2b917bef5d54f3792183b014cca7798f713ff14fe0b2ac79b4c9f6f996d", "17e853cbd8dc00cba3cd9ffeb36f26a9f41a0eb92f80b62c2cda16771c935388", "5f682ed7a8cf2f98387c3def7c97f9f05ae39e39d393eeca3cf621268d6347f8", "ad77487eaf11fd8084ba4517a51766eb0e5b77dd3492dfa79aa3a2802fb29d20", "d247cfcacf9a8200ebaddf639f8c926ab0a001abe682f40df3785e80ed124e91", "195589442e11907eede1ee6524157f1125f68399f3170c835ff81c603b069f6c", "5b5ca0a67f3c54e7d3a6a862ef56168ec9ed1f4945e6c24de6d336b2be2e6f8c", "56430e4caa253015f1f998dce4a48a88af1953f68e94eca14f53074ae9c3e467", "0b1eed6a5bf612d1d8e08f5c546f3d12e838568fd3aa43ed4c537f10c65545d6", "7058db19a56dfff01988ac4a62e1310597f9c8d7ebde6890dadabf047d722d39", "b847380d6888ff7cd11402d086b19eccc40950b52c9d67e73cb4f8462f5df078", "df6c048419a2290ab546d527e9eeba349e7f7e1759bafe4adac507ce60ef9670", "91fc5b4b24fc3fbfea7f9a3d0f0437cb5733c0c2345d8bdffd7048d6e3b8a37b", "957d8ea51b37523952b6f5ae95462fcd4aed1483ef32cc80b69580aaeee03606", "efa82e4e91ad9ab781977400e9ac0bb9de7389aaedebdae979b73d1d3b8d72b0", "7400c9f3f3fc0cc6fe8cc37ab24b9771f44e9f78be913f73cd35fc4be030d6bd", "9bb28f4122d61f7bb56fe27ef706159fb802fef0f5de9dfa32c9c5b3183235f1", "40a8de6e98953498b806614532ea4abf8b99ad7f9719fb68203a6eae2efa5b2a", "412de0b218b8f7dcacc9205cd16ffb4eca5b838f46a2f4f9f534026061a47308", "17f56ecad51075080680ad9faa0fd8946b824d3296ddb20be07f9809fe8d1c5a", "fffd4e7ae885a41948a342b6647955a7ec8a8039039f510cff467ef597675457", "35e78e11b5ac46a29dd04ab0043136c3291f4ca56cb949ace33111ed56395463", "94824fc80230af82077c83bfc01dc9675b1f9d3d538b1e5f41c21ac753598691", "fa470ae314ca3fce493f21b423eef2a49522e09126f6f2326fa3c9cac0b344f7", "7078860b5b621b21ac7b95f9fc4739c8235ce5066a8b9bd7d938146a34fa88ec", "eea53560f0428bfd2eca4f86a5ce9dec5ff1309129a975d73465c1c9e9da71d1",
)
}
/*
The regression test for the following invalid kademlia edge case.
Addresses used in this test are discovered as part of the simulation network
in higher level tests for streaming. They were generated randomly.
=========================================================================
Mon Apr 9 19:25:18 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 5dd5c7
population: 13 (49), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
000 2 e528 fad0 | 22 fad0 (0) e528 (0) e3bb (0) ed13 (0)
001 3 3f30 18e0 1dd3 | 7 3f30 (0) 23db (0) 10b6 (0) 18e0 (0)
002 4 7c54 7804 61e4 60f9 | 10 61e4 (0) 60f9 (0) 636c (0) 7186 (0)
003 2 40ae 4bae | 5 4bae (0) 4d5c (0) 403a (0) 40ae (0)
004 0 | 0
005 0 | 3 5808 (0) 5a0e (0) 5bdb (0)
============ DEPTH: 6 ==========================================
006 2 5f14 5f61 | 2 5f14 (0) 5f61 (0)
007 0 | 0
008 0 | 0
009 0 | 0
010 0 | 0
011 0 | 0
012 0 | 0
013 0 | 0
014 0 | 0
015 0 | 0
=========================================================================
*/
func TestKademliaCase5(t *testing.T) {
testKademliaCase(t,
"5dd5c77dd9006a800478fcebb02d48d4036389e7d3c8f6a83b97dbad13f4c0a9",
"78fafa0809929a1279ece089a51d12457c2d8416dff859aeb2ccc24bb50df5ec", "1dd39b1257e745f147cbbc3cadd609ccd6207c41056dbc4254bba5d2527d3ee5", "5f61dd66d4d94aec8fcc3ce0e7885c7edf30c43143fa730e2841c5d28e3cd081", "8aa8b0472cb351d967e575ad05c4b9f393e76c4b01ef4b3a54aac5283b78abc9", "4502f385152a915b438a6726ce3ea9342e7a6db91a23c2f6bee83a885ed7eb82", "718677a504249db47525e959ef1784bed167e1c46f1e0275b9c7b588e28a3758", "7c54c6ed1f8376323896ed3a4e048866410de189e9599dd89bf312ca4adb96b5", "18e03bd3378126c09e799a497150da5c24c895aedc84b6f0dbae41fc4bac081a", "23db76ac9e6e58d9f5395ca78252513a7b4118b4155f8462d3d5eec62486cadc", "40ae0e8f065e96c7adb7fa39505136401f01780481e678d718b7f6dbb2c906ec", "c1539998b8bae19d339d6bbb691f4e9daeb0e86847545229e80fe0dffe716e92", "ed139d73a2699e205574c08722ca9f030ad2d866c662f1112a276b91421c3cb9", "5bdb19584b7a36d09ca689422ef7e6bb681b8f2558a6b2177a8f7c812f631022", "636c9de7fe234ffc15d67a504c69702c719f626c17461d3f2918e924cd9d69e2", "de4455413ff9335c440d52458c6544191bd58a16d85f700c1de53b62773064ea", "de1963310849527acabc7885b6e345a56406a8f23e35e436b6d9725e69a79a83", "a80a50a467f561210a114cba6c7fb1489ed43a14d61a9edd70e2eb15c31f074d", "7804f12b8d8e6e4b375b242058242068a3809385e05df0e64973cde805cf729c", "60f9aa320c02c6f2e6370aa740cf7cea38083fa95fca8c99552cda52935c1520", "d8da963602390f6c002c00ce62a84b514edfce9ebde035b277a957264bb54d21", "8463d93256e026fe436abad44697152b9a56ac8e06a0583d318e9571b83d073c", "9a3f78fcefb9a05e40a23de55f6153d7a8b9d973ede43a380bf46bb3b3847de1", "e3bb576f4b3760b9ca6bff59326f4ebfc4a669d263fb7d67ab9797adea54ed13", "4d5cdbd6dcca5bdf819a0fe8d175dc55cc96f088d37462acd5ea14bc6296bdbe", "5a0ed28de7b5258c727cb85447071c74c00a5fbba9e6bc0393bc51944d04ab2a", "61e4ddb479c283c638f4edec24353b6cc7a3a13b930824aad016b0996ca93c47", "7e3610868acf714836cafaaa7b8c009a9ac6e3a6d443e5586cf661530a204ee2", "d74b244d4345d2c86e30a097105e4fb133d53c578320285132a952cdaa64416e", "cfeed57d0f935bfab89e3f630a7c97e0b1605f0724d85a008bbfb92cb47863a8", "580837af95055670e20d494978f60c7f1458dc4b9e389fc7aa4982b2aca3bce3", "df55c0c49e6c8a83d82dfa1c307d3bf6a20e18721c80d8ec4f1f68dc0a137ced", "5f149c51ce581ba32a285439a806c063ced01ccd4211cd024e6a615b8f216f95", "1eb76b00aeb127b10dd1b7cd4c3edeb4d812b5a658f0feb13e85c4d2b7c6fe06", "7a56ba7c3fb7cbfb5561a46a75d95d7722096b45771ec16e6fa7bbfab0b35dfe", "4bae85ad88c28470f0015246d530adc0cd1778bdd5145c3c6b538ee50c4e04bd", "afd1892e2a7145c99ec0ebe9ded0d3fec21089b277a68d47f45961ec5e39e7e0", "953138885d7b36b0ef79e46030f8e61fd7037fbe5ce9e0a94d728e8c8d7eab86", "de761613ef305e4f628cb6bf97d7b7dc69a9d513dc233630792de97bcda777a6", "3f3087280063d09504c084bbf7fdf984347a72b50d097fd5b086ffabb5b3fb4c", "7d18a94bb1ebfdef4d3e454d2db8cb772f30ca57920dd1e402184a9e598581a0", "a7d6fbdc9126d9f10d10617f49fb9f5474ffe1b229f76b7dd27cebba30eccb5d", "fad0246303618353d1387ec10c09ee991eb6180697ed3470ed9a6b377695203d", "1cf66e09ea51ee5c23df26615a9e7420be2ac8063f28f60a3bc86020e94fe6f3", "8269cdaa153da7c358b0b940791af74d7c651cd4d3f5ed13acfe6d0f2c539e7f", "90d52eaaa60e74bf1c79106113f2599471a902d7b1c39ac1f55b20604f453c09", "9788fd0c09190a3f3d0541f68073a2f44c2fcc45bb97558a7c319f36c25a75b3", "10b68fc44157ecfdae238ee6c1ce0333f906ad04d1a4cb1505c8e35c3c87fbb0", "e5284117fdf3757920475c786e0004cb00ba0932163659a89b36651a01e57394", "403ad51d911e113dcd5f9ff58c94f6d278886a2a4da64c3ceca2083282c92de3",
)
}

@ -1,308 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network
import (
"fmt"
"net"
"time"
"github.com/ethereum/go-ethereum/contracts/chequebook"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/swarm/network/kademlia"
"github.com/ethereum/go-ethereum/swarm/services/swap"
"github.com/ethereum/go-ethereum/swarm/storage"
)
/*
BZZ protocol Message Types and Message Data Types
*/
// bzz protocol message codes
const (
statusMsg = iota // 0x01
storeRequestMsg // 0x02
retrieveRequestMsg // 0x03
peersMsg // 0x04
syncRequestMsg // 0x05
deliveryRequestMsg // 0x06
unsyncedKeysMsg // 0x07
paymentMsg // 0x08
)
/*
Handshake
* Version: 8 byte integer version of the protocol
* ID: arbitrary byte sequence client identifier human readable
* Addr: the address advertised by the node, format similar to DEVp2p wire protocol
* Swap: info for the swarm accounting protocol
* NetworkID: 8 byte integer network identifier
* Caps: swarm-specific capabilities, format identical to devp2p
* SyncState: syncronisation state (db iterator key and address space etc) persisted about the peer
*/
type statusMsgData struct {
Version uint64
ID string
Addr *peerAddr
Swap *swap.SwapProfile
NetworkId uint64
}
func (self *statusMsgData) String() string {
return fmt.Sprintf("Status: Version: %v, ID: %v, Addr: %v, Swap: %v, NetworkId: %v", self.Version, self.ID, self.Addr, self.Swap, self.NetworkId)
}
/*
store requests are forwarded to the peers in their kademlia proximity bin
if they are distant
if they are within our storage radius or have any incentive to store it
then attach your nodeID to the metadata
if the storage request is sufficiently close (within our proxLimit, i. e., the
last row of the routing table)
*/
type storeRequestMsgData struct {
Key storage.Key // hash of datasize | data
SData []byte // the actual chunk Data
// optional
Id uint64 // request ID. if delivery, the ID is retrieve request ID
requestTimeout *time.Time // expiry for forwarding - [not serialised][not currently used]
storageTimeout *time.Time // expiry of content - [not serialised][not currently used]
from *peer // [not serialised] protocol registers the requester
}
func (self storeRequestMsgData) String() string {
var from string
if self.from == nil {
from = "self"
} else {
from = self.from.Addr().String()
}
end := len(self.SData)
if len(self.SData) > 10 {
end = 10
}
return fmt.Sprintf("from: %v, Key: %v; ID: %v, requestTimeout: %v, storageTimeout: %v, SData %x", from, self.Key, self.Id, self.requestTimeout, self.storageTimeout, self.SData[:end])
}
/*
Retrieve request
Timeout in milliseconds. Note that zero timeout retrieval requests do not request forwarding, but prompt for a peers message response. therefore they serve also
as messages to retrieve peers.
MaxSize specifies the maximum size that the peer will accept. This is useful in
particular if we allow storage and delivery of multichunk payload representing
the entire or partial subtree unfolding from the requested root key.
So when only interested in limited part of a stream (infinite trees) or only
testing chunk availability etc etc, we can indicate it by limiting the size here.
Request ID can be newly generated or kept from the request originator.
If request ID Is missing or zero, the request is handled as a lookup only
prompting a peers response but not launching a search. Lookup requests are meant
to be used to bootstrap kademlia tables.
In the special case that the key is the zero value as well, the remote peer's
address is assumed (the message is to be handled as a self lookup request).
The response is a PeersMsg with the peers in the kademlia proximity bin
corresponding to the address.
*/
type retrieveRequestMsgData struct {
Key storage.Key // target Key address of chunk to be retrieved
Id uint64 // request id, request is a lookup if missing or zero
MaxSize uint64 // maximum size of delivery accepted
MaxPeers uint64 // maximum number of peers returned
Timeout uint64 // the longest time we are expecting a response
timeout *time.Time // [not serialied]
from *peer //
}
func (self *retrieveRequestMsgData) String() string {
var from string
if self.from == nil {
from = "ourselves"
} else {
from = self.from.Addr().String()
}
var target []byte
if len(self.Key) > 3 {
target = self.Key[:4]
}
return fmt.Sprintf("from: %v, Key: %x; ID: %v, MaxSize: %v, MaxPeers: %d", from, target, self.Id, self.MaxSize, self.MaxPeers)
}
// lookups are encoded by missing request ID
func (self *retrieveRequestMsgData) isLookup() bool {
return self.Id == 0
}
// sets timeout fields
func (self *retrieveRequestMsgData) setTimeout(t *time.Time) {
self.timeout = t
if t != nil {
self.Timeout = uint64(t.UnixNano())
} else {
self.Timeout = 0
}
}
func (self *retrieveRequestMsgData) getTimeout() (t *time.Time) {
if self.Timeout > 0 && self.timeout == nil {
timeout := time.Unix(int64(self.Timeout), 0)
t = &timeout
self.timeout = t
}
return
}
// peerAddr is sent in StatusMsg as part of the handshake
type peerAddr struct {
IP net.IP
Port uint16
ID []byte // the 64 byte NodeID (ECDSA Public Key)
Addr kademlia.Address
}
// peerAddr pretty prints as enode
func (self *peerAddr) String() string {
var nodeid discover.NodeID
copy(nodeid[:], self.ID)
return discover.NewNode(nodeid, self.IP, 0, self.Port).String()
}
/*
peers Msg is one response to retrieval; it is always encouraged after a retrieval
request to respond with a list of peers in the same kademlia proximity bin.
The encoding of a peer is identical to that in the devp2p base protocol peers
messages: [IP, Port, NodeID]
note that a node's DPA address is not the NodeID but the hash of the NodeID.
Timeout serves to indicate whether the responder is forwarding the query within
the timeout or not.
NodeID serves as the owner of payment contracts and signer of proofs of transfer.
The Key is the target (if response to a retrieval request) or missing (zero value)
peers address (hash of NodeID) if retrieval request was a self lookup.
Peers message is requested by retrieval requests with a missing or zero value request ID
*/
type peersMsgData struct {
Peers []*peerAddr //
Timeout uint64 //
timeout *time.Time // indicate whether responder is expected to deliver content
Key storage.Key // present if a response to a retrieval request
Id uint64 // present if a response to a retrieval request
from *peer
}
// peers msg pretty printer
func (self *peersMsgData) String() string {
var from string
if self.from == nil {
from = "ourselves"
} else {
from = self.from.Addr().String()
}
var target []byte
if len(self.Key) > 3 {
target = self.Key[:4]
}
return fmt.Sprintf("from: %v, Key: %x; ID: %v, Peers: %v", from, target, self.Id, self.Peers)
}
func (self *peersMsgData) setTimeout(t *time.Time) {
self.timeout = t
if t != nil {
self.Timeout = uint64(t.UnixNano())
} else {
self.Timeout = 0
}
}
/*
syncRequest
is sent after the handshake to initiate syncing
the syncState of the remote node is persisted in kaddb and set on the
peer/protocol instance when the node is registered by hive as online{
*/
type syncRequestMsgData struct {
SyncState *syncState `rlp:"nil"`
}
func (self *syncRequestMsgData) String() string {
return fmt.Sprintf("%v", self.SyncState)
}
/*
deliveryRequest
is sent once a batch of sync keys is filtered. The ones not found are
sent as a list of syncReuest (hash, priority) in the Deliver field.
When the source receives the sync request it continues to iterate
and fetch at most N items as yet unsynced.
At the same time responds with deliveries of the items.
*/
type deliveryRequestMsgData struct {
Deliver []*syncRequest
}
func (self *deliveryRequestMsgData) String() string {
return fmt.Sprintf("sync request for new chunks\ndelivery request for %v chunks", len(self.Deliver))
}
/*
unsyncedKeys
is sent first after the handshake if SyncState iterator brings up hundreds, thousands?
and subsequently sent as a response to deliveryRequestMsgData.
Syncing is the iterative process of exchanging unsyncedKeys and deliveryRequestMsgs
both ways.
State contains the sync state sent by the source. When the source receives the
sync state it continues to iterate and fetch at most N items as yet unsynced.
At the same time responds with deliveries of the items.
*/
type unsyncedKeysMsgData struct {
Unsynced []*syncRequest
State *syncState
}
func (self *unsyncedKeysMsgData) String() string {
return fmt.Sprintf("sync: keys of %d new chunks (state %v) => synced: %v", len(self.Unsynced), self.State, self.State.Synced)
}
/*
payment
is sent when the swap balance is tilted in favour of the remote peer
and in absolute units exceeds the PayAt parameter in the remote peer's profile
*/
type paymentMsgData struct {
Units uint // units actually paid for (checked against amount by swap)
Promise *chequebook.Cheque // payment with cheque
}
func (self *paymentMsgData) String() string {
return fmt.Sprintf("payment for %d units: %v", self.Units, self.Promise)
}

@ -0,0 +1,111 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// package priority_queue implement a channel based priority queue
// over arbitrary types. It provides an
// an autopop loop applying a function to the items always respecting
// their priority. The structure is only quasi consistent ie., if a lower
// priority item is autopopped, it is guaranteed that there was a point
// when no higher priority item was present, ie. it is not guaranteed
// that there was any point where the lower priority item was present
// but the higher was not
package priorityqueue
import (
"context"
"errors"
)
var (
errContention = errors.New("queue contention")
errBadPriority = errors.New("bad priority")
wakey = struct{}{}
)
// PriorityQueue is the basic structure
type PriorityQueue struct {
queues []chan interface{}
wakeup chan struct{}
}
// New is the constructor for PriorityQueue
func New(n int, l int) *PriorityQueue {
var queues = make([]chan interface{}, n)
for i := range queues {
queues[i] = make(chan interface{}, l)
}
return &PriorityQueue{
queues: queues,
wakeup: make(chan struct{}, 1),
}
}
// Run is a forever loop popping items from the queues
func (pq *PriorityQueue) Run(ctx context.Context, f func(interface{})) {
top := len(pq.queues) - 1
p := top
READ:
for {
q := pq.queues[p]
select {
case <-ctx.Done():
return
case x := <-q:
f(x)
p = top
default:
if p > 0 {
p--
continue READ
}
p = top
select {
case <-ctx.Done():
return
case <-pq.wakeup:
}
}
}
}
// Push pushes an item to the appropriate queue specified in the priority argument
// if context is given it waits until either the item is pushed or the Context aborts
// otherwise returns errContention if the queue is full
func (pq *PriorityQueue) Push(ctx context.Context, x interface{}, p int) error {
if p < 0 || p >= len(pq.queues) {
return errBadPriority
}
if ctx == nil {
select {
case pq.queues[p] <- x:
default:
return errContention
}
} else {
select {
case pq.queues[p] <- x:
case <-ctx.Done():
return ctx.Err()
}
}
select {
case pq.wakeup <- wakey:
default:
}
return nil
}

@ -0,0 +1,97 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package priorityqueue
import (
"context"
"sync"
"testing"
)
func TestPriorityQueue(t *testing.T) {
var results []string
wg := sync.WaitGroup{}
pq := New(3, 2)
wg.Add(1)
go pq.Run(context.Background(), func(v interface{}) {
results = append(results, v.(string))
wg.Done()
})
pq.Push(context.Background(), "2.0", 2)
wg.Wait()
if results[0] != "2.0" {
t.Errorf("expected first result %q, got %q", "2.0", results[0])
}
Loop:
for i, tc := range []struct {
priorities []int
values []string
results []string
errors []error
}{
{
priorities: []int{0},
values: []string{""},
results: []string{""},
},
{
priorities: []int{0, 1},
values: []string{"0.0", "1.0"},
results: []string{"1.0", "0.0"},
},
{
priorities: []int{1, 0},
values: []string{"1.0", "0.0"},
results: []string{"1.0", "0.0"},
},
{
priorities: []int{0, 1, 1},
values: []string{"0.0", "1.0", "1.1"},
results: []string{"1.0", "1.1", "0.0"},
},
{
priorities: []int{0, 0, 0},
values: []string{"0.0", "0.0", "0.1"},
errors: []error{nil, nil, errContention},
},
} {
var results []string
wg := sync.WaitGroup{}
pq := New(3, 2)
wg.Add(len(tc.values))
for j, value := range tc.values {
err := pq.Push(nil, value, tc.priorities[j])
if tc.errors != nil && err != tc.errors[j] {
t.Errorf("expected push error %v, got %v", tc.errors[j], err)
continue Loop
}
if err != nil {
continue Loop
}
}
go pq.Run(context.Background(), func(v interface{}) {
results = append(results, v.(string))
wg.Done()
})
wg.Wait()
for k, result := range tc.results {
if results[k] != result {
t.Errorf("test case %v: expected %v element %q, got %q", i, k, result, results[k])
}
}
}
}

@ -16,519 +16,414 @@
package network package network
/*
bzz implements the swarm wire protocol [bzz] (sister of eth and shh)
the protocol instance is launched on each peer by the network layer if the
bzz protocol handler is registered on the p2p server.
The bzz protocol component speaks the bzz protocol
* handle the protocol handshake
* register peers in the KΛÐΞMLIΛ table via the hive logistic manager
* dispatch to hive for handling the DHT logic
* encode and decode requests for storage and retrieval
* handle sync protocol messages via the syncer
* talks the SWAP payment protocol (swap accounting is done within NetStore)
*/
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net" "net"
"strconv" "sync"
"time" "time"
"github.com/ethereum/go-ethereum/contracts/chequebook" "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p"
bzzswap "github.com/ethereum/go-ethereum/swarm/services/swap" "github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/swarm/services/swap/swap" "github.com/ethereum/go-ethereum/p2p/protocols"
"github.com/ethereum/go-ethereum/swarm/storage" "github.com/ethereum/go-ethereum/rpc"
) "github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/state"
//metrics variables
var (
storeRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.storerequest.count", nil)
retrieveRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.retrieverequest.count", nil)
peersMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.peers.count", nil)
syncRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.syncrequest.count", nil)
unsyncedKeysMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.unsyncedkeys.count", nil)
deliverRequestMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.deliverrequest.count", nil)
paymentMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.payment.count", nil)
invalidMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.invalid.count", nil)
handleStatusMsgCounter = metrics.NewRegisteredCounter("network.protocol.msg.handlestatus.count", nil)
) )
const ( const (
Version = 0 DefaultNetworkID = 3
ProtocolLength = uint64(8) // ProtocolMaxMsgSize maximum allowed message size
ProtocolMaxMsgSize = 10 * 1024 * 1024 ProtocolMaxMsgSize = 10 * 1024 * 1024
NetworkId = 3 // timeout for waiting
bzzHandshakeTimeout = 3000 * time.Millisecond
) )
// bzz represents the swarm wire protocol // BzzSpec is the spec of the generic swarm handshake
// an instance is running on each peer var BzzSpec = &protocols.Spec{
type bzz struct { Name: "bzz",
storage StorageHandler // handler storage/retrieval related requests coming via the bzz wire protocol Version: 4,
hive *Hive // the logistic manager, peerPool, routing service and peer handler MaxMsgSize: 10 * 1024 * 1024,
dbAccess *DbAccess // access to db storage counter and iterator for syncing Messages: []interface{}{
requestDb *storage.LDBDatabase // db to persist backlog of deliveries to aid syncing HandshakeMsg{},
remoteAddr *peerAddr // remote peers address },
peer *p2p.Peer // the p2p peer object
rw p2p.MsgReadWriter // messageReadWriter to send messages to
backend chequebook.Backend
lastActive time.Time
NetworkId uint64
swap *swap.Swap // swap instance for the peer connection
swapParams *bzzswap.SwapParams // swap settings both local and remote
swapEnabled bool // flag to enable SWAP (will be set via Caps in handshake)
syncEnabled bool // flag to enable SYNC (will be set via Caps in handshake)
syncer *syncer // syncer instance for the peer connection
syncParams *SyncParams // syncer params
syncState *syncState // outgoing syncronisation state (contains reference to remote peers db counter)
}
// interface type for handler of storage/retrieval related requests coming
// via the bzz wire protocol
// messages: UnsyncedKeys, DeliveryRequest, StoreRequest, RetrieveRequest
type StorageHandler interface {
HandleUnsyncedKeysMsg(req *unsyncedKeysMsgData, p *peer) error
HandleDeliveryRequestMsg(req *deliveryRequestMsgData, p *peer) error
HandleStoreRequestMsg(req *storeRequestMsgData, p *peer)
HandleRetrieveRequestMsg(req *retrieveRequestMsgData, p *peer)
} }
/* // DiscoverySpec is the spec for the bzz discovery subprotocols
main entrypoint, wrappers starting a server that will run the bzz protocol var DiscoverySpec = &protocols.Spec{
use this constructor to attach the protocol ("class") to server caps Name: "hive",
This is done by node.Node#Register(func(node.ServiceContext) (Service, error)) Version: 4,
Service implements Protocols() which is an array of protocol constructors MaxMsgSize: 10 * 1024 * 1024,
at node startup the protocols are initialised Messages: []interface{}{
the Dev p2p layer then calls Run(p *p2p.Peer, rw p2p.MsgReadWriter) error peersMsg{},
on each peer connection subPeersMsg{},
The Run function of the Bzz protocol class creates a bzz instance },
which will represent the peer for the swarm hive and all peer-aware components }
*/
func Bzz(cloud StorageHandler, backend chequebook.Backend, hive *Hive, dbaccess *DbAccess, sp *bzzswap.SwapParams, sy *SyncParams, networkId uint64) (p2p.Protocol, error) {
// a single global request db is created for all peer connections // Addr interface that peerPool needs
// this is to persist delivery backlog and aid syncronisation type Addr interface {
requestDb, err := storage.NewLDBDatabase(sy.RequestDbPath) OverlayPeer
if err != nil { Over() []byte
return p2p.Protocol{}, fmt.Errorf("error setting up request db: %v", err) Under() []byte
} String() string
if networkId == 0 { Update(OverlayAddr) OverlayAddr
networkId = NetworkId
}
return p2p.Protocol{
Name: "bzz",
Version: Version,
Length: ProtocolLength,
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
return run(requestDb, cloud, backend, hive, dbaccess, sp, sy, networkId, p, rw)
},
}, nil
} }
/* // Peer interface represents an live peer connection
the main protocol loop that type Peer interface {
* does the handshake by exchanging statusMsg Addr // the address of a peer
* if peer is valid and accepted, registers with the hive Conn // the live connection (protocols.Peer)
* then enters into a forever loop handling incoming messages LastActive() time.Time // last time active
* storage and retrieval related queries coming via bzz are dispatched to StorageHandler }
* peer-related messages are dispatched to the hive
* payment related messages are relayed to SWAP service
* on disconnect, unregister the peer in the hive (note RemovePeer in the post-disconnect hook)
* whenever the loop terminates, the peer will disconnect with Subprotocol error
* whenever handlers return an error the loop terminates
*/
func run(requestDb *storage.LDBDatabase, depo StorageHandler, backend chequebook.Backend, hive *Hive, dbaccess *DbAccess, sp *bzzswap.SwapParams, sy *SyncParams, networkId uint64, p *p2p.Peer, rw p2p.MsgReadWriter) (err error) {
self := &bzz{
storage: depo,
backend: backend,
hive: hive,
dbAccess: dbaccess,
requestDb: requestDb,
peer: p,
rw: rw,
swapParams: sp,
syncParams: sy,
swapEnabled: hive.swapEnabled,
syncEnabled: true,
NetworkId: networkId,
}
// handle handshake // Conn interface represents an live peer connection
err = self.handleStatus() type Conn interface {
if err != nil { ID() discover.NodeID // the key that uniquely identifies the Node for the peerPool
return err Handshake(context.Context, interface{}, func(interface{}) error) (interface{}, error) // can send messages
} Send(interface{}) error // can send messages
defer func() { Drop(error) // disconnect this peer
// if the handler loop exits, the peer is disconnecting Run(func(interface{}) error) error // the run function to run a protocol
// deregister the peer in the hive Off() OverlayAddr
self.hive.removePeer(&peer{bzz: self}) }
if self.syncer != nil {
self.syncer.stop() // quits request db and delivery loops, save requests
}
if self.swap != nil {
self.swap.Stop() // quits chequebox autocash etc
}
}()
// the main forever loop that handles incoming requests // BzzConfig captures the config params used by the hive
for { type BzzConfig struct {
if self.hive.blockRead { OverlayAddr []byte // base address of the overlay network
log.Warn(fmt.Sprintf("Cannot read network")) UnderlayAddr []byte // node's underlay address
time.Sleep(100 * time.Millisecond) HiveParams *HiveParams
continue NetworkID uint64
} }
err = self.handle()
if err != nil { // Bzz is the swarm protocol bundle
return type Bzz struct {
} *Hive
NetworkID uint64
localAddr *BzzAddr
mtx sync.Mutex
handshakes map[discover.NodeID]*HandshakeMsg
streamerSpec *protocols.Spec
streamerRun func(*BzzPeer) error
}
// NewBzz is the swarm protocol constructor
// arguments
// * bzz config
// * overlay driver
// * peer store
func NewBzz(config *BzzConfig, kad Overlay, store state.Store, streamerSpec *protocols.Spec, streamerRun func(*BzzPeer) error) *Bzz {
return &Bzz{
Hive: NewHive(config.HiveParams, kad, store),
NetworkID: config.NetworkID,
localAddr: &BzzAddr{config.OverlayAddr, config.UnderlayAddr},
handshakes: make(map[discover.NodeID]*HandshakeMsg),
streamerRun: streamerRun,
streamerSpec: streamerSpec,
} }
} }
// TODO: may need to implement protocol drop only? don't want to kick off the peer // UpdateLocalAddr updates underlayaddress of the running node
// if they are useful for other protocols func (b *Bzz) UpdateLocalAddr(byteaddr []byte) *BzzAddr {
func (self *bzz) Drop() { b.localAddr = b.localAddr.Update(&BzzAddr{
self.peer.Disconnect(p2p.DiscSubprotocolError) UAddr: byteaddr,
OAddr: b.localAddr.OAddr,
}).(*BzzAddr)
return b.localAddr
} }
// one cycle of the main forever loop that handles and dispatches incoming messages // NodeInfo returns the node's overlay address
func (self *bzz) handle() error { func (b *Bzz) NodeInfo() interface{} {
msg, err := self.rw.ReadMsg() return b.localAddr.Address()
log.Debug(fmt.Sprintf("<- %v", msg)) }
if err != nil {
return err // Protocols return the protocols swarm offers
// Bzz implements the node.Service interface
// * handshake/hive
// * discovery
func (b *Bzz) Protocols() []p2p.Protocol {
protocol := []p2p.Protocol{
{
Name: BzzSpec.Name,
Version: BzzSpec.Version,
Length: BzzSpec.Length(),
Run: b.runBzz,
NodeInfo: b.NodeInfo,
},
{
Name: DiscoverySpec.Name,
Version: DiscoverySpec.Version,
Length: DiscoverySpec.Length(),
Run: b.RunProtocol(DiscoverySpec, b.Hive.Run),
NodeInfo: b.Hive.NodeInfo,
PeerInfo: b.Hive.PeerInfo,
},
} }
if msg.Size > ProtocolMaxMsgSize { if b.streamerSpec != nil && b.streamerRun != nil {
return fmt.Errorf("message too long: %v > %v", msg.Size, ProtocolMaxMsgSize) protocol = append(protocol, p2p.Protocol{
Name: b.streamerSpec.Name,
Version: b.streamerSpec.Version,
Length: b.streamerSpec.Length(),
Run: b.RunProtocol(b.streamerSpec, b.streamerRun),
})
} }
// make sure that the payload has been fully consumed return protocol
defer msg.Discard() }
switch msg.Code { // APIs returns the APIs offered by bzz
// * hive
case statusMsg: // Bzz implements the node.Service interface
// no extra status message allowed. The one needed already handled by func (b *Bzz) APIs() []rpc.API {
// handleStatus return []rpc.API{{
log.Debug(fmt.Sprintf("Status message: %v", msg)) Namespace: "hive",
return errors.New("extra status message") Version: "3.0",
Service: b.Hive,
case storeRequestMsg: }}
// store requests are dispatched to netStore }
storeRequestMsgCounter.Inc(1)
var req storeRequestMsgData
if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
}
if n := len(req.SData); n < 9 {
return fmt.Errorf("<- %v: Data too short (%v)", msg, n)
}
// last Active time is set only when receiving chunks
self.lastActive = time.Now()
log.Trace(fmt.Sprintf("incoming store request: %s", req.String()))
// swap accounting is done within forwarding
self.storage.HandleStoreRequestMsg(&req, &peer{bzz: self})
case retrieveRequestMsg:
// retrieve Requests are dispatched to netStore
retrieveRequestMsgCounter.Inc(1)
var req retrieveRequestMsgData
if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
}
req.from = &peer{bzz: self}
// if request is lookup and not to be delivered
if req.isLookup() {
log.Trace(fmt.Sprintf("self lookup for %v: responding with peers only...", req.from))
} else if req.Key == nil {
return fmt.Errorf("protocol handler: req.Key == nil || req.Timeout == nil")
} else {
// swap accounting is done within netStore
self.storage.HandleRetrieveRequestMsg(&req, &peer{bzz: self})
}
// direct response with peers, TODO: sort this out
self.hive.peers(&req)
case peersMsg:
// response to lookups and immediate response to retrieve requests
// dispatches new peer data to the hive that adds them to KADDB
peersMsgCounter.Inc(1)
var req peersMsgData
if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
}
req.from = &peer{bzz: self}
log.Trace(fmt.Sprintf("<- peer addresses: %v", req))
self.hive.HandlePeersMsg(&req, &peer{bzz: self})
case syncRequestMsg:
syncRequestMsgCounter.Inc(1)
var req syncRequestMsgData
if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
}
log.Debug(fmt.Sprintf("<- sync request: %v", req))
self.lastActive = time.Now()
self.sync(req.SyncState)
case unsyncedKeysMsg:
// coming from parent node offering
unsyncedKeysMsgCounter.Inc(1)
var req unsyncedKeysMsgData
if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
}
log.Debug(fmt.Sprintf("<- unsynced keys : %s", req.String()))
err := self.storage.HandleUnsyncedKeysMsg(&req, &peer{bzz: self})
self.lastActive = time.Now()
if err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
}
case deliveryRequestMsg: // RunProtocol is a wrapper for swarm subprotocols
// response to syncKeysMsg hashes filtered not existing in db // returns a p2p protocol run function that can be assigned to p2p.Protocol#Run field
// also relays the last synced state to the source // arguments:
deliverRequestMsgCounter.Inc(1) // * p2p protocol spec
var req deliveryRequestMsgData // * run function taking BzzPeer as argument
if err := msg.Decode(&req); err != nil { // this run function is meant to block for the duration of the protocol session
return fmt.Errorf("<-msg %v: %v", msg, err) // on return the session is terminated and the peer is disconnected
// the protocol waits for the bzz handshake is negotiated
// the overlay address on the BzzPeer is set from the remote handshake
func (b *Bzz) RunProtocol(spec *protocols.Spec, run func(*BzzPeer) error) func(*p2p.Peer, p2p.MsgReadWriter) error {
return func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
// wait for the bzz protocol to perform the handshake
handshake, _ := b.GetHandshake(p.ID())
defer b.removeHandshake(p.ID())
select {
case <-handshake.done:
case <-time.After(bzzHandshakeTimeout):
return fmt.Errorf("%08x: %s protocol timeout waiting for handshake on %08x", b.BaseAddr()[:4], spec.Name, p.ID().Bytes()[:4])
} }
log.Debug(fmt.Sprintf("<- delivery request: %s", req.String())) if handshake.err != nil {
err := self.storage.HandleDeliveryRequestMsg(&req, &peer{bzz: self}) return fmt.Errorf("%08x: %s protocol closed: %v", b.BaseAddr()[:4], spec.Name, handshake.err)
self.lastActive = time.Now()
if err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
} }
// the handshake has succeeded so construct the BzzPeer and run the protocol
case paymentMsg: peer := &BzzPeer{
// swap protocol message for payment, Units paid for, Cheque paid with Peer: protocols.NewPeer(p, rw, spec),
paymentMsgCounter.Inc(1) localAddr: b.localAddr,
if self.swapEnabled { BzzAddr: handshake.peerAddr,
var req paymentMsgData lastActive: time.Now(),
if err := msg.Decode(&req); err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
}
log.Debug(fmt.Sprintf("<- payment: %s", req.String()))
self.swap.Receive(int(req.Units), req.Promise)
} }
return run(peer)
}
}
default: // performHandshake implements the negotiation of the bzz handshake
// no other message is allowed // shared among swarm subprotocols
invalidMsgCounter.Inc(1) func (b *Bzz) performHandshake(p *protocols.Peer, handshake *HandshakeMsg) error {
return fmt.Errorf("invalid message code: %v", msg.Code) ctx, cancel := context.WithTimeout(context.Background(), bzzHandshakeTimeout)
defer func() {
close(handshake.done)
cancel()
}()
rsh, err := p.Handshake(ctx, handshake, b.checkHandshake)
if err != nil {
handshake.err = err
return err
} }
handshake.peerAddr = rsh.(*HandshakeMsg).Addr
return nil return nil
} }
func (self *bzz) handleStatus() (err error) { // runBzz is the p2p protocol run function for the bzz base protocol
// that negotiates the bzz handshake
handshake := &statusMsgData{ func (b *Bzz) runBzz(p *p2p.Peer, rw p2p.MsgReadWriter) error {
Version: uint64(Version), handshake, _ := b.GetHandshake(p.ID())
ID: "honey", if !<-handshake.init {
Addr: self.selfAddr(), return fmt.Errorf("%08x: bzz already started on peer %08x", b.localAddr.Over()[:4], ToOverlayAddr(p.ID().Bytes())[:4])
NetworkId: self.NetworkId,
Swap: &bzzswap.SwapProfile{
Profile: self.swapParams.Profile,
PayProfile: self.swapParams.PayProfile,
},
} }
close(handshake.init)
err = p2p.Send(self.rw, statusMsg, handshake) defer b.removeHandshake(p.ID())
peer := protocols.NewPeer(p, rw, BzzSpec)
err := b.performHandshake(peer, handshake)
if err != nil { if err != nil {
log.Warn(fmt.Sprintf("%08x: handshake failed with remote peer %08x: %v", b.localAddr.Over()[:4], ToOverlayAddr(p.ID().Bytes())[:4], err))
return err return err
} }
// fail if we get another handshake
// read and handle remote status msg, err := rw.ReadMsg()
var msg p2p.Msg
msg, err = self.rw.ReadMsg()
if err != nil { if err != nil {
return err return err
} }
msg.Discard()
return errors.New("received multiple handshakes")
}
if msg.Code != statusMsg { // BzzPeer is the bzz protocol view of a protocols.Peer (itself an extension of p2p.Peer)
return fmt.Errorf("first msg has code %x (!= %x)", msg.Code, statusMsg) // implements the Peer interface and all interfaces Peer implements: Addr, OverlayPeer
} type BzzPeer struct {
*protocols.Peer // represents the connection for online peers
handleStatusMsgCounter.Inc(1) localAddr *BzzAddr // local Peers address
*BzzAddr // remote address -> implements Addr interface = protocols.Peer
if msg.Size > ProtocolMaxMsgSize { lastActive time.Time // time is updated whenever mutexes are releasing
return fmt.Errorf("message too long: %v > %v", msg.Size, ProtocolMaxMsgSize) }
}
var status statusMsgData
if err := msg.Decode(&status); err != nil {
return fmt.Errorf("<- %v: %v", msg, err)
}
if status.NetworkId != self.NetworkId { func NewBzzTestPeer(p *protocols.Peer, addr *BzzAddr) *BzzPeer {
return fmt.Errorf("network id mismatch: %d (!= %d)", status.NetworkId, self.NetworkId) return &BzzPeer{
Peer: p,
localAddr: addr,
BzzAddr: NewAddrFromNodeID(p.ID()),
} }
}
if Version != status.Version { // Off returns the overlay peer record for offline persistence
return fmt.Errorf("protocol version mismatch: %d (!= %d)", status.Version, Version) func (p *BzzPeer) Off() OverlayAddr {
} return p.BzzAddr
}
self.remoteAddr = self.peerAddr(status.Addr) // LastActive returns the time the peer was last active
log.Trace(fmt.Sprintf("self: advertised IP: %v, peer advertised: %v, local address: %v\npeer: advertised IP: %v, remote address: %v\n", self.selfAddr(), self.remoteAddr, self.peer.LocalAddr(), status.Addr.IP, self.peer.RemoteAddr())) func (p *BzzPeer) LastActive() time.Time {
return p.lastActive
}
if self.swapEnabled { /*
// set remote profile for accounting Handshake
self.swap, err = bzzswap.NewSwap(self.swapParams, status.Swap, self.backend, self)
if err != nil {
return err
}
}
log.Info(fmt.Sprintf("Peer %08x is capable (%d/%d)", self.remoteAddr.Addr[:4], status.Version, status.NetworkId)) * Version: 8 byte integer version of the protocol
err = self.hive.addPeer(&peer{bzz: self}) * NetworkID: 8 byte integer network identifier
if err != nil { * Addr: the address advertised by the node including underlay and overlay connecctions
return err */
} type HandshakeMsg struct {
Version uint64
NetworkID uint64
Addr *BzzAddr
// hive sets syncstate so sync should start after node added // peerAddr is the address received in the peer handshake
log.Info(fmt.Sprintf("syncronisation request sent with %v", self.syncState)) peerAddr *BzzAddr
self.syncRequest()
return nil init chan bool
done chan struct{}
err error
} }
func (self *bzz) sync(state *syncState) error { // String pretty prints the handshake
// syncer setup func (bh *HandshakeMsg) String() string {
if self.syncer != nil { return fmt.Sprintf("Handshake: Version: %v, NetworkID: %v, Addr: %v", bh.Version, bh.NetworkID, bh.Addr)
return errors.New("sync request can only be sent once") }
}
cnt := self.dbAccess.counter() // Perform initiates the handshake and validates the remote handshake message
remoteaddr := self.remoteAddr.Addr func (b *Bzz) checkHandshake(hs interface{}) error {
start, stop := self.hive.kad.KeyRange(remoteaddr) rhs := hs.(*HandshakeMsg)
if rhs.NetworkID != b.NetworkID {
// an explicitly received nil syncstate disables syncronisation return fmt.Errorf("network id mismatch %d (!= %d)", rhs.NetworkID, b.NetworkID)
if state == nil {
self.syncEnabled = false
log.Warn(fmt.Sprintf("syncronisation disabled for peer %v", self))
state = &syncState{DbSyncState: &storage.DbSyncState{}, Synced: true}
} else {
state.synced = make(chan bool)
state.SessionAt = cnt
if storage.IsZeroKey(state.Stop) && state.Synced {
state.Start = storage.Key(start[:])
state.Stop = storage.Key(stop[:])
}
log.Debug(fmt.Sprintf("syncronisation requested by peer %v at state %v", self, state))
} }
var err error if rhs.Version != uint64(BzzSpec.Version) {
self.syncer, err = newSyncer( return fmt.Errorf("version mismatch %d (!= %d)", rhs.Version, BzzSpec.Version)
self.requestDb,
storage.Key(remoteaddr[:]),
self.dbAccess,
self.unsyncedKeys, self.store,
self.syncParams, state, func() bool { return self.syncEnabled },
)
if err != nil {
return nil
} }
log.Trace(fmt.Sprintf("syncer set for peer %v", self))
return nil return nil
} }
func (self *bzz) String() string { // removeHandshake removes handshake for peer with peerID
return self.remoteAddr.String() // from the bzz handshake store
func (b *Bzz) removeHandshake(peerID discover.NodeID) {
b.mtx.Lock()
defer b.mtx.Unlock()
delete(b.handshakes, peerID)
} }
// repair reported address if IP missing // GetHandshake returns the bzz handhake that the remote peer with peerID sent
func (self *bzz) peerAddr(base *peerAddr) *peerAddr { func (b *Bzz) GetHandshake(peerID discover.NodeID) (*HandshakeMsg, bool) {
if base.IP.IsUnspecified() { b.mtx.Lock()
host, _, _ := net.SplitHostPort(self.peer.RemoteAddr().String()) defer b.mtx.Unlock()
base.IP = net.ParseIP(host) handshake, found := b.handshakes[peerID]
} if !found {
return base handshake = &HandshakeMsg{
} Version: uint64(BzzSpec.Version),
NetworkID: b.NetworkID,
// returns self advertised node connection info (listening address w enodes) Addr: b.localAddr,
// IP will get repaired on the other end if missing init: make(chan bool, 1),
// or resolved via ID by discovery at dialout done: make(chan struct{}),
func (self *bzz) selfAddr() *peerAddr { }
id := self.hive.id // when handhsake is first created for a remote peer
host, port, _ := net.SplitHostPort(self.hive.listenAddr()) // it is initialised with the init
intport, _ := strconv.Atoi(port) handshake.init <- true
addr := &peerAddr{ b.handshakes[peerID] = handshake
Addr: self.hive.addr,
ID: id[:],
IP: net.ParseIP(host),
Port: uint16(intport),
} }
return addr
return handshake, found
} }
// outgoing messages // BzzAddr implements the PeerAddr interface
// send retrieveRequestMsg type BzzAddr struct {
func (self *bzz) retrieve(req *retrieveRequestMsgData) error { OAddr []byte
return self.send(retrieveRequestMsg, req) UAddr []byte
} }
// send storeRequestMsg // Address implements OverlayPeer interface to be used in Overlay
func (self *bzz) store(req *storeRequestMsgData) error { func (a *BzzAddr) Address() []byte {
return self.send(storeRequestMsg, req) return a.OAddr
} }
func (self *bzz) syncRequest() error { // Over returns the overlay address
req := &syncRequestMsgData{} func (a *BzzAddr) Over() []byte {
if self.hive.syncEnabled { return a.OAddr
log.Debug(fmt.Sprintf("syncronisation request to peer %v at state %v", self, self.syncState))
req.SyncState = self.syncState
}
if self.syncState == nil {
log.Warn(fmt.Sprintf("syncronisation disabled for peer %v at state %v", self, self.syncState))
}
return self.send(syncRequestMsg, req)
} }
// queue storeRequestMsg in request db // Under returns the underlay address
func (self *bzz) deliveryRequest(reqs []*syncRequest) error { func (a *BzzAddr) Under() []byte {
req := &deliveryRequestMsgData{ return a.UAddr
Deliver: reqs,
}
return self.send(deliveryRequestMsg, req)
} }
// batch of syncRequests to send off // ID returns the nodeID from the underlay enode address
func (self *bzz) unsyncedKeys(reqs []*syncRequest, state *syncState) error { func (a *BzzAddr) ID() discover.NodeID {
req := &unsyncedKeysMsgData{ return discover.MustParseNode(string(a.UAddr)).ID
Unsynced: reqs,
State: state,
}
return self.send(unsyncedKeysMsg, req)
} }
// send paymentMsg // Update updates the underlay address of a peer record
func (self *bzz) Pay(units int, promise swap.Promise) { func (a *BzzAddr) Update(na OverlayAddr) OverlayAddr {
req := &paymentMsgData{uint(units), promise.(*chequebook.Cheque)} return &BzzAddr{a.OAddr, na.(Addr).Under()}
self.payment(req)
} }
// send paymentMsg // String pretty prints the address
func (self *bzz) payment(req *paymentMsgData) error { func (a *BzzAddr) String() string {
return self.send(paymentMsg, req) return fmt.Sprintf("%x <%s>", a.OAddr, a.UAddr)
}
// RandomAddr is a utility method generating an address from a public key
func RandomAddr() *BzzAddr {
key, err := crypto.GenerateKey()
if err != nil {
panic("unable to generate key")
}
pubkey := crypto.FromECDSAPub(&key.PublicKey)
var id discover.NodeID
copy(id[:], pubkey[1:])
return NewAddrFromNodeID(id)
} }
// sends peersMsg // NewNodeIDFromAddr transforms the underlay address to an adapters.NodeID
func (self *bzz) peers(req *peersMsgData) error { func NewNodeIDFromAddr(addr Addr) discover.NodeID {
return self.send(peersMsg, req) log.Info(fmt.Sprintf("uaddr=%s", string(addr.Under())))
node := discover.MustParseNode(string(addr.Under()))
return node.ID
} }
func (self *bzz) send(msg uint64, data interface{}) error { // NewAddrFromNodeID constucts a BzzAddr from a discover.NodeID
if self.hive.blockWrite { // the overlay address is derived as the hash of the nodeID
return fmt.Errorf("network write blocked") func NewAddrFromNodeID(id discover.NodeID) *BzzAddr {
return &BzzAddr{
OAddr: ToOverlayAddr(id.Bytes()),
UAddr: []byte(discover.NewNode(id, net.IP{127, 0, 0, 1}, 30303, 30303).String()),
} }
log.Trace(fmt.Sprintf("-> %v: %v (%T) to %v", msg, data, data, self)) }
err := p2p.Send(self.rw, msg, data)
if err != nil { // NewAddrFromNodeIDAndPort constucts a BzzAddr from a discover.NodeID and port uint16
self.Drop() // the overlay address is derived as the hash of the nodeID
func NewAddrFromNodeIDAndPort(id discover.NodeID, host net.IP, port uint16) *BzzAddr {
return &BzzAddr{
OAddr: ToOverlayAddr(id.Bytes()),
UAddr: []byte(discover.NewNode(id, host, port, port).String()),
} }
return err }
// ToOverlayAddr creates an overlayaddress from a byte slice
func ToOverlayAddr(id []byte) []byte {
return crypto.Keccak256(id)
} }

@ -1,4 +1,4 @@
// Copyright 2014 The go-ethereum Authors // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library. // This file is part of the go-ethereum library.
// //
// The go-ethereum library is free software: you can redistribute it and/or modify // The go-ethereum library is free software: you can redistribute it and/or modify
@ -15,3 +15,226 @@
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package network package network
import (
"flag"
"fmt"
"os"
"sync"
"testing"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/protocols"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
)
var (
loglevel = flag.Int("loglevel", 2, "verbosity of logs")
)
func init() {
flag.Parse()
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
}
type testStore struct {
sync.Mutex
values map[string][]byte
}
func newTestStore() *testStore {
return &testStore{values: make(map[string][]byte)}
}
func (t *testStore) Load(key string) ([]byte, error) {
t.Lock()
defer t.Unlock()
v, ok := t.values[key]
if !ok {
return nil, fmt.Errorf("key not found: %s", key)
}
return v, nil
}
func (t *testStore) Save(key string, v []byte) error {
t.Lock()
defer t.Unlock()
t.values[key] = v
return nil
}
func HandshakeMsgExchange(lhs, rhs *HandshakeMsg, id discover.NodeID) []p2ptest.Exchange {
return []p2ptest.Exchange{
{
Expects: []p2ptest.Expect{
{
Code: 0,
Msg: lhs,
Peer: id,
},
},
},
{
Triggers: []p2ptest.Trigger{
{
Code: 0,
Msg: rhs,
Peer: id,
},
},
},
}
}
func newBzzBaseTester(t *testing.T, n int, addr *BzzAddr, spec *protocols.Spec, run func(*BzzPeer) error) *bzzTester {
cs := make(map[string]chan bool)
srv := func(p *BzzPeer) error {
defer func() {
if cs[p.ID().String()] != nil {
close(cs[p.ID().String()])
}
}()
return run(p)
}
protocol := func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
return srv(&BzzPeer{
Peer: protocols.NewPeer(p, rw, spec),
localAddr: addr,
BzzAddr: NewAddrFromNodeID(p.ID()),
})
}
s := p2ptest.NewProtocolTester(t, NewNodeIDFromAddr(addr), n, protocol)
for _, id := range s.IDs {
cs[id.String()] = make(chan bool)
}
return &bzzTester{
addr: addr,
ProtocolTester: s,
cs: cs,
}
}
type bzzTester struct {
*p2ptest.ProtocolTester
addr *BzzAddr
cs map[string]chan bool
}
func newBzzHandshakeTester(t *testing.T, n int, addr *BzzAddr) *bzzTester {
config := &BzzConfig{
OverlayAddr: addr.Over(),
UnderlayAddr: addr.Under(),
HiveParams: NewHiveParams(),
NetworkID: DefaultNetworkID,
}
kad := NewKademlia(addr.OAddr, NewKadParams())
bzz := NewBzz(config, kad, nil, nil, nil)
s := p2ptest.NewProtocolTester(t, NewNodeIDFromAddr(addr), 1, bzz.runBzz)
return &bzzTester{
addr: addr,
ProtocolTester: s,
}
}
// should test handshakes in one exchange? parallelisation
func (s *bzzTester) testHandshake(lhs, rhs *HandshakeMsg, disconnects ...*p2ptest.Disconnect) error {
var peers []discover.NodeID
id := NewNodeIDFromAddr(rhs.Addr)
if len(disconnects) > 0 {
for _, d := range disconnects {
peers = append(peers, d.Peer)
}
} else {
peers = []discover.NodeID{id}
}
if err := s.TestExchanges(HandshakeMsgExchange(lhs, rhs, id)...); err != nil {
return err
}
if len(disconnects) > 0 {
return s.TestDisconnected(disconnects...)
}
// If we don't expect disconnect, ensure peers remain connected
err := s.TestDisconnected(&p2ptest.Disconnect{
Peer: s.IDs[0],
Error: nil,
})
if err == nil {
return fmt.Errorf("Unexpected peer disconnect")
}
if err.Error() != "timed out waiting for peers to disconnect" {
return err
}
return nil
}
func correctBzzHandshake(addr *BzzAddr) *HandshakeMsg {
return &HandshakeMsg{
Version: 4,
NetworkID: DefaultNetworkID,
Addr: addr,
}
}
func TestBzzHandshakeNetworkIDMismatch(t *testing.T) {
addr := RandomAddr()
s := newBzzHandshakeTester(t, 1, addr)
id := s.IDs[0]
err := s.testHandshake(
correctBzzHandshake(addr),
&HandshakeMsg{Version: 4, NetworkID: 321, Addr: NewAddrFromNodeID(id)},
&p2ptest.Disconnect{Peer: id, Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): network id mismatch 321 (!= 3)")},
)
if err != nil {
t.Fatal(err)
}
}
func TestBzzHandshakeVersionMismatch(t *testing.T) {
addr := RandomAddr()
s := newBzzHandshakeTester(t, 1, addr)
id := s.IDs[0]
err := s.testHandshake(
correctBzzHandshake(addr),
&HandshakeMsg{Version: 0, NetworkID: 3, Addr: NewAddrFromNodeID(id)},
&p2ptest.Disconnect{Peer: id, Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): version mismatch 0 (!= 4)")},
)
if err != nil {
t.Fatal(err)
}
}
func TestBzzHandshakeSuccess(t *testing.T) {
addr := RandomAddr()
s := newBzzHandshakeTester(t, 1, addr)
id := s.IDs[0]
err := s.testHandshake(
correctBzzHandshake(addr),
&HandshakeMsg{Version: 4, NetworkID: 3, Addr: NewAddrFromNodeID(id)},
)
if err != nil {
t.Fatal(err)
}
}

@ -0,0 +1,17 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discovery

@ -0,0 +1,586 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package discovery
import (
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"strings"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/state"
colorable "github.com/mattn/go-colorable"
)
// serviceName is used with the exec adapter so the exec'd binary knows which
// service to execute
const serviceName = "discovery"
const testMinProxBinSize = 2
const discoveryPersistenceDatadir = "discovery_persistence_test_store"
var discoveryPersistencePath = path.Join(os.TempDir(), discoveryPersistenceDatadir)
var discoveryEnabled = true
var persistenceEnabled = false
var services = adapters.Services{
serviceName: newService,
}
func cleanDbStores() error {
entries, err := ioutil.ReadDir(os.TempDir())
if err != nil {
return err
}
for _, f := range entries {
if strings.HasPrefix(f.Name(), discoveryPersistenceDatadir) {
os.RemoveAll(path.Join(os.TempDir(), f.Name()))
}
}
return nil
}
func getDbStore(nodeID string) (*state.DBStore, error) {
if _, err := os.Stat(discoveryPersistencePath + "_" + nodeID); os.IsNotExist(err) {
log.Info(fmt.Sprintf("directory for nodeID %s does not exist. creating...", nodeID))
ioutil.TempDir("", discoveryPersistencePath+"_"+nodeID)
}
log.Info(fmt.Sprintf("opening storage directory for nodeID %s", nodeID))
store, err := state.NewDBStore(discoveryPersistencePath + "_" + nodeID)
if err != nil {
return nil, err
}
return store, nil
}
var (
nodeCount = flag.Int("nodes", 10, "number of nodes to create (default 10)")
initCount = flag.Int("conns", 1, "number of originally connected peers (default 1)")
snapshotFile = flag.String("snapshot", "", "create snapshot")
loglevel = flag.Int("loglevel", 3, "verbosity of logs")
rawlog = flag.Bool("rawlog", false, "remove terminal formatting from logs")
)
func init() {
flag.Parse()
// register the discovery service which will run as a devp2p
// protocol when using the exec adapter
adapters.RegisterServices(services)
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(!*rawlog))))
}
// Benchmarks to test the average time it takes for an N-node ring
// to full a healthy kademlia topology
func BenchmarkDiscovery_8_1(b *testing.B) { benchmarkDiscovery(b, 8, 1) }
func BenchmarkDiscovery_16_1(b *testing.B) { benchmarkDiscovery(b, 16, 1) }
func BenchmarkDiscovery_32_1(b *testing.B) { benchmarkDiscovery(b, 32, 1) }
func BenchmarkDiscovery_64_1(b *testing.B) { benchmarkDiscovery(b, 64, 1) }
func BenchmarkDiscovery_128_1(b *testing.B) { benchmarkDiscovery(b, 128, 1) }
func BenchmarkDiscovery_256_1(b *testing.B) { benchmarkDiscovery(b, 256, 1) }
func BenchmarkDiscovery_8_2(b *testing.B) { benchmarkDiscovery(b, 8, 2) }
func BenchmarkDiscovery_16_2(b *testing.B) { benchmarkDiscovery(b, 16, 2) }
func BenchmarkDiscovery_32_2(b *testing.B) { benchmarkDiscovery(b, 32, 2) }
func BenchmarkDiscovery_64_2(b *testing.B) { benchmarkDiscovery(b, 64, 2) }
func BenchmarkDiscovery_128_2(b *testing.B) { benchmarkDiscovery(b, 128, 2) }
func BenchmarkDiscovery_256_2(b *testing.B) { benchmarkDiscovery(b, 256, 2) }
func BenchmarkDiscovery_8_4(b *testing.B) { benchmarkDiscovery(b, 8, 4) }
func BenchmarkDiscovery_16_4(b *testing.B) { benchmarkDiscovery(b, 16, 4) }
func BenchmarkDiscovery_32_4(b *testing.B) { benchmarkDiscovery(b, 32, 4) }
func BenchmarkDiscovery_64_4(b *testing.B) { benchmarkDiscovery(b, 64, 4) }
func BenchmarkDiscovery_128_4(b *testing.B) { benchmarkDiscovery(b, 128, 4) }
func BenchmarkDiscovery_256_4(b *testing.B) { benchmarkDiscovery(b, 256, 4) }
func TestDiscoverySimulationDockerAdapter(t *testing.T) {
testDiscoverySimulationDockerAdapter(t, *nodeCount, *initCount)
}
func testDiscoverySimulationDockerAdapter(t *testing.T, nodes, conns int) {
adapter, err := adapters.NewDockerAdapter()
if err != nil {
if err == adapters.ErrLinuxOnly {
t.Skip(err)
} else {
t.Fatal(err)
}
}
testDiscoverySimulation(t, nodes, conns, adapter)
}
func TestDiscoverySimulationExecAdapter(t *testing.T) {
testDiscoverySimulationExecAdapter(t, *nodeCount, *initCount)
}
func testDiscoverySimulationExecAdapter(t *testing.T, nodes, conns int) {
baseDir, err := ioutil.TempDir("", "swarm-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(baseDir)
testDiscoverySimulation(t, nodes, conns, adapters.NewExecAdapter(baseDir))
}
func TestDiscoverySimulationSimAdapter(t *testing.T) {
testDiscoverySimulationSimAdapter(t, *nodeCount, *initCount)
}
func TestDiscoveryPersistenceSimulationSimAdapter(t *testing.T) {
testDiscoveryPersistenceSimulationSimAdapter(t, *nodeCount, *initCount)
}
func testDiscoveryPersistenceSimulationSimAdapter(t *testing.T, nodes, conns int) {
testDiscoveryPersistenceSimulation(t, nodes, conns, adapters.NewSimAdapter(services))
}
func testDiscoverySimulationSimAdapter(t *testing.T, nodes, conns int) {
testDiscoverySimulation(t, nodes, conns, adapters.NewSimAdapter(services))
}
func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) {
startedAt := time.Now()
result, err := discoverySimulation(nodes, conns, adapter)
if err != nil {
t.Fatalf("Setting up simulation failed: %v", err)
}
if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error)
}
t.Logf("Simulation with %d nodes passed in %s", nodes, result.FinishedAt.Sub(result.StartedAt))
var min, max time.Duration
var sum int
for _, pass := range result.Passes {
duration := pass.Sub(result.StartedAt)
if sum == 0 || duration < min {
min = duration
}
if duration > max {
max = duration
}
sum += int(duration.Nanoseconds())
}
t.Logf("Min: %s, Max: %s, Average: %s", min, max, time.Duration(sum/len(result.Passes))*time.Nanosecond)
finishedAt := time.Now()
t.Logf("Setup: %s, shutdown: %s", result.StartedAt.Sub(startedAt), finishedAt.Sub(result.FinishedAt))
}
func testDiscoveryPersistenceSimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) map[int][]byte {
persistenceEnabled = true
discoveryEnabled = true
result, err := discoveryPersistenceSimulation(nodes, conns, adapter)
if err != nil {
t.Fatalf("Setting up simulation failed: %v", err)
}
if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error)
}
t.Logf("Simulation with %d nodes passed in %s", nodes, result.FinishedAt.Sub(result.StartedAt))
// set the discovery and persistence flags again to default so other
// tests will not be affected
discoveryEnabled = true
persistenceEnabled = false
return nil
}
func benchmarkDiscovery(b *testing.B, nodes, conns int) {
for i := 0; i < b.N; i++ {
result, err := discoverySimulation(nodes, conns, adapters.NewSimAdapter(services))
if err != nil {
b.Fatalf("setting up simulation failed: %v", err)
}
if result.Error != nil {
b.Logf("simulation failed: %s", result.Error)
}
}
}
func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simulations.StepResult, error) {
// create network
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
ID: "0",
DefaultService: serviceName,
})
defer net.Shutdown()
trigger := make(chan discover.NodeID)
ids := make([]discover.NodeID, nodes)
for i := 0; i < nodes; i++ {
conf := adapters.RandomNodeConfig()
node, err := net.NewNodeWithConfig(conf)
if err != nil {
return nil, fmt.Errorf("error starting node: %s", err)
}
if err := net.Start(node.ID()); err != nil {
return nil, fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
}
if err := triggerChecks(trigger, net, node.ID()); err != nil {
return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
}
ids[i] = node.ID()
}
// run a simulation which connects the 10 nodes in a ring and waits
// for full peer discovery
var addrs [][]byte
action := func(ctx context.Context) error {
return nil
}
wg := sync.WaitGroup{}
for i := range ids {
// collect the overlay addresses, to
addrs = append(addrs, network.ToOverlayAddr(ids[i].Bytes()))
for j := 0; j < conns; j++ {
var k int
if j == 0 {
k = (i + 1) % len(ids)
} else {
k = rand.Intn(len(ids))
}
wg.Add(1)
go func(i, k int) {
defer wg.Done()
net.Connect(ids[i], ids[k])
}(i, k)
}
}
wg.Wait()
log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
// construct the peer pot, so that kademlia health can be checked
ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs)
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
default:
}
node := net.GetNode(id)
if node == nil {
return false, fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return false, fmt.Errorf("error getting node client: %s", err)
}
healthy := &network.Health{}
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes()))
if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil {
return false, fmt.Errorf("error getting node health: %s", err)
}
log.Debug(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v\n%v", id, healthy.GotNN, healthy.KnowNN, healthy.Full, healthy.Hive))
return healthy.KnowNN && healthy.GotNN && healthy.Full, nil
}
// 64 nodes ~ 1min
// 128 nodes ~
timeout := 300 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
Action: action,
Trigger: trigger,
Expect: &simulations.Expectation{
Nodes: ids,
Check: check,
},
})
if result.Error != nil {
return result, nil
}
if *snapshotFile != "" {
snap, err := net.Snapshot()
if err != nil {
return nil, errors.New("no shapshot dude")
}
jsonsnapshot, err := json.Marshal(snap)
if err != nil {
return nil, fmt.Errorf("corrupt json snapshot: %v", err)
}
log.Info("writing snapshot", "file", *snapshotFile)
err = ioutil.WriteFile(*snapshotFile, jsonsnapshot, 0755)
if err != nil {
return nil, err
}
}
return result, nil
}
func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simulations.StepResult, error) {
cleanDbStores()
defer cleanDbStores()
// create network
net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
ID: "0",
DefaultService: serviceName,
})
defer net.Shutdown()
trigger := make(chan discover.NodeID)
ids := make([]discover.NodeID, nodes)
var addrs [][]byte
for i := 0; i < nodes; i++ {
conf := adapters.RandomNodeConfig()
node, err := net.NewNodeWithConfig(conf)
if err != nil {
panic(err)
}
if err != nil {
return nil, fmt.Errorf("error starting node: %s", err)
}
if err := net.Start(node.ID()); err != nil {
return nil, fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
}
if err := triggerChecks(trigger, net, node.ID()); err != nil {
return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
}
ids[i] = node.ID()
a := network.ToOverlayAddr(ids[i].Bytes())
addrs = append(addrs, a)
}
// run a simulation which connects the 10 nodes in a ring and waits
// for full peer discovery
ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs)
var restartTime time.Time
action := func(ctx context.Context) error {
ticker := time.NewTicker(500 * time.Millisecond)
for range ticker.C {
isHealthy := true
for _, id := range ids {
//call Healthy RPC
node := net.GetNode(id)
if node == nil {
return fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return fmt.Errorf("error getting node client: %s", err)
}
healthy := &network.Health{}
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes()))
if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil {
return fmt.Errorf("error getting node health: %s", err)
}
log.Info(fmt.Sprintf("NODE: %s, IS HEALTHY: %t", id.String(), healthy.GotNN && healthy.KnowNN && healthy.Full))
if !healthy.GotNN || !healthy.Full {
isHealthy = false
break
}
}
if isHealthy {
break
}
}
ticker.Stop()
log.Info("reached healthy kademlia. starting to shutdown nodes.")
shutdownStarted := time.Now()
// stop all ids, then start them again
for _, id := range ids {
node := net.GetNode(id)
if err := net.Stop(node.ID()); err != nil {
return fmt.Errorf("error stopping node %s: %s", node.ID().TerminalString(), err)
}
}
log.Info(fmt.Sprintf("shutting down nodes took: %s", time.Since(shutdownStarted)))
persistenceEnabled = true
discoveryEnabled = false
restartTime = time.Now()
for _, id := range ids {
node := net.GetNode(id)
if err := net.Start(node.ID()); err != nil {
return fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
}
if err := triggerChecks(trigger, net, node.ID()); err != nil {
return fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
}
}
log.Info(fmt.Sprintf("restarting nodes took: %s", time.Since(restartTime)))
return nil
}
//connects in a chain
wg := sync.WaitGroup{}
//connects in a ring
for i := range ids {
for j := 1; j <= conns; j++ {
k := (i + j) % len(ids)
if k == i {
k = (k + 1) % len(ids)
}
wg.Add(1)
go func(i, k int) {
defer wg.Done()
net.Connect(ids[i], ids[k])
}(i, k)
}
}
wg.Wait()
log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
// construct the peer pot, so that kademlia health can be checked
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
select {
case <-ctx.Done():
return false, ctx.Err()
default:
}
node := net.GetNode(id)
if node == nil {
return false, fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return false, fmt.Errorf("error getting node client: %s", err)
}
healthy := &network.Health{}
addr := common.Bytes2Hex(network.ToOverlayAddr(id.Bytes()))
if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil {
return false, fmt.Errorf("error getting node health: %s", err)
}
log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v", id, healthy.GotNN, healthy.KnowNN, healthy.Full))
return healthy.KnowNN && healthy.GotNN && healthy.Full, nil
}
// 64 nodes ~ 1min
// 128 nodes ~
timeout := 300 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
Action: action,
Trigger: trigger,
Expect: &simulations.Expectation{
Nodes: ids,
Check: check,
},
})
if result.Error != nil {
return result, nil
}
return result, nil
}
// triggerChecks triggers a simulation step check whenever a peer is added or
// removed from the given node, and also every second to avoid a race between
// peer events and kademlia becoming healthy
func triggerChecks(trigger chan discover.NodeID, net *simulations.Network, id discover.NodeID) error {
node := net.GetNode(id)
if node == nil {
return fmt.Errorf("unknown node: %s", id)
}
client, err := node.Client()
if err != nil {
return err
}
events := make(chan *p2p.PeerEvent)
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
if err != nil {
return fmt.Errorf("error getting peer events for node %v: %s", id, err)
}
go func() {
defer sub.Unsubscribe()
tick := time.NewTicker(time.Second)
defer tick.Stop()
for {
select {
case <-events:
trigger <- id
case <-tick.C:
trigger <- id
case err := <-sub.Err():
if err != nil {
log.Error(fmt.Sprintf("error getting peer events for node %v", id), "err", err)
}
return
}
}
}()
return nil
}
func newService(ctx *adapters.ServiceContext) (node.Service, error) {
host := adapters.ExternalIP()
addr := network.NewAddrFromNodeIDAndPort(ctx.Config.ID, host, ctx.Config.Port)
kp := network.NewKadParams()
kp.MinProxBinSize = testMinProxBinSize
if ctx.Config.Reachable != nil {
kp.Reachable = func(o network.OverlayAddr) bool {
return ctx.Config.Reachable(o.(*network.BzzAddr).ID())
}
}
kad := network.NewKademlia(addr.Over(), kp)
hp := network.NewHiveParams()
hp.KeepAliveInterval = time.Duration(200) * time.Millisecond
hp.Discovery = discoveryEnabled
log.Info(fmt.Sprintf("discovery for nodeID %s is %t", ctx.Config.ID.String(), hp.Discovery))
config := &network.BzzConfig{
OverlayAddr: addr.Over(),
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
if persistenceEnabled {
log.Info(fmt.Sprintf("persistence enabled for nodeID %s", ctx.Config.ID.String()))
store, err := getDbStore(ctx.Config.ID.String())
if err != nil {
return nil, err
}
return network.NewBzz(config, kad, store, nil, nil), nil
}
return network.NewBzz(config, kad, nil, nil, nil), nil
}

File diff suppressed because one or more lines are too long

@ -0,0 +1,144 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// You can run this simulation using
//
// go run ./swarm/network/simulations/overlay.go
package main
import (
"flag"
"fmt"
"net/http"
"runtime"
"sync"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/state"
colorable "github.com/mattn/go-colorable"
)
var (
noDiscovery = flag.Bool("no-discovery", false, "disable discovery (useful if you want to load a snapshot)")
vmodule = flag.String("vmodule", "", "log filters for logger via Vmodule")
verbosity = flag.Int("verbosity", 0, "log filters for logger via Vmodule")
httpSimPort = 8888
)
func init() {
flag.Parse()
//initialize the logger
//this is a demonstration on how to use Vmodule for filtering logs
//provide -vmodule as param, and comma-separated values, e.g.:
//-vmodule overlay_test.go=4,simulations=3
//above examples sets overlay_test.go logs to level 4, while packages ending with "simulations" to 3
if *vmodule != "" {
//only enable the pattern matching handler if the flag has been provided
glogger := log.NewGlogHandler(log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))
if *verbosity > 0 {
glogger.Verbosity(log.Lvl(*verbosity))
}
glogger.Vmodule(*vmodule)
log.Root().SetHandler(glogger)
}
}
type Simulation struct {
mtx sync.Mutex
stores map[discover.NodeID]*state.InmemoryStore
}
func NewSimulation() *Simulation {
return &Simulation{
stores: make(map[discover.NodeID]*state.InmemoryStore),
}
}
func (s *Simulation) NewService(ctx *adapters.ServiceContext) (node.Service, error) {
id := ctx.Config.ID
s.mtx.Lock()
store, ok := s.stores[id]
if !ok {
store = state.NewInmemoryStore()
s.stores[id] = store
}
s.mtx.Unlock()
addr := network.NewAddrFromNodeID(id)
kp := network.NewKadParams()
kp.MinProxBinSize = 2
kp.MaxBinSize = 4
kp.MinBinSize = 1
kp.MaxRetries = 1000
kp.RetryExponent = 2
kp.RetryInterval = 1000000
kad := network.NewKademlia(addr.Over(), kp)
hp := network.NewHiveParams()
hp.Discovery = !*noDiscovery
hp.KeepAliveInterval = 300 * time.Millisecond
config := &network.BzzConfig{
OverlayAddr: addr.Over(),
UnderlayAddr: addr.Under(),
HiveParams: hp,
}
return network.NewBzz(config, kad, store, nil, nil), nil
}
//create the simulation network
func newSimulationNetwork() *simulations.Network {
s := NewSimulation()
services := adapters.Services{
"overlay": s.NewService,
}
adapter := adapters.NewSimAdapter(services)
simNetwork := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
DefaultService: "overlay",
})
return simNetwork
}
//return a new http server
func newOverlaySim(sim *simulations.Network) *simulations.Server {
return simulations.NewServer(sim)
}
// var server
func main() {
//cpu optimization
runtime.GOMAXPROCS(runtime.NumCPU())
//run the sim
runOverlaySim()
}
func runOverlaySim() {
//create the simulation network
net := newSimulationNetwork()
//create a http server with it
sim := newOverlaySim(net)
log.Info(fmt.Sprintf("starting simulation server on 0.0.0.0:%d...", httpSimPort))
//start the HTTP server
http.ListenAndServe(fmt.Sprintf(":%d", httpSimPort), sim)
}

@ -0,0 +1,195 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"time"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/swarm/log"
)
var (
nodeCount = 16
)
//This test is used to test the overlay simulation.
//As the simulation is executed via a main, it is easily missed on changes
//An automated test will prevent that
//The test just connects to the simulations, starts the network,
//starts the mocker, gets the number of nodes, and stops it again.
//It also provides a documentation on the steps needed by frontends
//to use the simulations
func TestOverlaySim(t *testing.T) {
t.Skip("Test is flaky, see: https://github.com/ethersphere/go-ethereum/issues/592")
//start the simulation
log.Info("Start simulation backend")
//get the simulation networ; needed to subscribe for up events
net := newSimulationNetwork()
//create the overlay simulation
sim := newOverlaySim(net)
//create a http test server with it
srv := httptest.NewServer(sim)
defer srv.Close()
log.Debug("Http simulation server started. Start simulation network")
//start the simulation network (initialization of simulation)
resp, err := http.Post(srv.URL+"/start", "application/json", nil)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Expected Status Code %d, got %d", http.StatusOK, resp.StatusCode)
}
log.Debug("Start mocker")
//start the mocker, needs a node count and an ID
resp, err = http.PostForm(srv.URL+"/mocker/start",
url.Values{
"node-count": {fmt.Sprintf("%d", nodeCount)},
"mocker-type": {simulations.GetMockerList()[0]},
})
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
reason, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
t.Fatalf("Expected Status Code %d, got %d, response body %s", http.StatusOK, resp.StatusCode, string(reason))
}
//variables needed to wait for nodes being up
var upCount int
trigger := make(chan discover.NodeID)
//wait for all nodes to be up
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
//start watching node up events...
go watchSimEvents(net, ctx, trigger)
//...and wait until all expected up events (nodeCount) have been received
LOOP:
for {
select {
case <-trigger:
//new node up event received, increase counter
upCount++
//all expected node up events received
if upCount == nodeCount {
break LOOP
}
case <-ctx.Done():
t.Fatalf("Timed out waiting for up events")
}
}
//at this point we can query the server
log.Info("Get number of nodes")
//get the number of nodes
resp, err = http.Get(srv.URL + "/nodes")
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
//unmarshal number of nodes from JSON response
var nodesArr []simulations.Node
err = json.Unmarshal(b, &nodesArr)
if err != nil {
t.Fatal(err)
}
//check if number of nodes received is same as sent
if len(nodesArr) != nodeCount {
t.Fatal(fmt.Errorf("Expected %d number of nodes, got %d", nodeCount, len(nodesArr)))
}
//need to let it run for a little while, otherwise stopping it immediately can crash due running nodes
//wanting to connect to already stopped nodes
time.Sleep(1 * time.Second)
log.Info("Stop the network")
//stop the network
resp, err = http.Post(srv.URL+"/stop", "application/json", nil)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
log.Info("Reset the network")
//reset the network (removes all nodes and connections)
resp, err = http.Post(srv.URL+"/reset", "application/json", nil)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
t.Fatalf("err %s", resp.Status)
}
}
//watch for events so we know when all nodes are up
func watchSimEvents(net *simulations.Network, ctx context.Context, trigger chan discover.NodeID) {
events := make(chan *simulations.Event)
sub := net.Events().Subscribe(events)
defer sub.Unsubscribe()
for {
select {
case ev := <-events:
//only catch node up events
if ev.Type == simulations.EventTypeNode {
if ev.Node.Up {
log.Debug("got node up event", "event", ev, "node", ev.Node.Config.ID)
select {
case trigger <- ev.Node.Config.ID:
case <-ctx.Done():
return
}
}
}
case <-ctx.Done():
return
}
}
}

@ -0,0 +1,449 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package stream
import (
"context"
"encoding/binary"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"sync/atomic"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/mock"
"github.com/ethereum/go-ethereum/swarm/storage/mock/db"
colorable "github.com/mattn/go-colorable"
)
var (
deliveries map[discover.NodeID]*Delivery
stores map[discover.NodeID]storage.ChunkStore
toAddr func(discover.NodeID) *network.BzzAddr
peerCount func(discover.NodeID) int
adapter = flag.String("adapter", "sim", "type of simulation: sim|exec|docker")
loglevel = flag.Int("loglevel", 2, "verbosity of logs")
nodes = flag.Int("nodes", 0, "number of nodes")
chunks = flag.Int("chunks", 0, "number of chunks")
useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
)
var (
defaultSkipCheck bool
waitPeerErrC chan error
chunkSize = 4096
registries map[discover.NodeID]*TestRegistry
createStoreFunc func(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error)
getRetrieveFunc = defaultRetrieveFunc
subscriptionCount = 0
globalStore mock.GlobalStorer
globalStoreDir string
)
var services = adapters.Services{
"streamer": NewStreamerService,
"intervalsStreamer": newIntervalsStreamerService,
}
func init() {
flag.Parse()
// register the Delivery service which will run as a devp2p
// protocol when using the exec adapter
adapters.RegisterServices(services)
log.PrintOrigins(true)
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
}
func createGlobalStore() {
var err error
globalStoreDir, err = ioutil.TempDir("", "global.store")
if err != nil {
log.Error("Error initiating global store temp directory!", "err", err)
return
}
globalStore, err = db.NewGlobalStore(globalStoreDir)
if err != nil {
log.Error("Error initiating global store!", "err", err)
}
}
// NewStreamerService
func NewStreamerService(ctx *adapters.ServiceContext) (node.Service, error) {
var err error
id := ctx.Config.ID
addr := toAddr(id)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
stores[id], err = createStoreFunc(id, addr)
if err != nil {
return nil, err
}
store := stores[id].(*storage.LocalStore)
db := storage.NewDBAPI(store)
delivery := NewDelivery(kad, db)
deliveries[id] = delivery
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
SkipCheck: defaultSkipCheck,
DoRetrieve: false,
})
RegisterSwarmSyncerServer(r, db)
RegisterSwarmSyncerClient(r, db)
go func() {
waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id))
}()
fileStore := storage.NewFileStore(storage.NewNetStore(store, getRetrieveFunc(id)), storage.NewFileStoreParams())
testRegistry := &TestRegistry{Registry: r, fileStore: fileStore}
registries[id] = testRegistry
return testRegistry, nil
}
func defaultRetrieveFunc(id discover.NodeID) func(chunk *storage.Chunk) error {
return nil
}
func datadirsCleanup() {
for _, id := range ids {
os.RemoveAll(datadirs[id])
}
if globalStoreDir != "" {
os.RemoveAll(globalStoreDir)
}
}
//local stores need to be cleaned up after the sim is done
func localStoreCleanup() {
log.Info("Cleaning up...")
for _, id := range ids {
registries[id].Close()
stores[id].Close()
}
log.Info("Local store cleanup done")
}
func newStreamerTester(t *testing.T) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
// setup
addr := network.RandomAddr() // tested peers peer address
to := network.NewKademlia(addr.OAddr, network.NewKadParams())
// temp datadir
datadir, err := ioutil.TempDir("", "streamer")
if err != nil {
return nil, nil, nil, func() {}, err
}
removeDataDir := func() {
os.RemoveAll(datadir)
}
params := storage.NewDefaultLocalStoreParams()
params.Init(datadir)
params.BaseKey = addr.Over()
localStore, err := storage.NewTestLocalStoreForAddr(params)
if err != nil {
return nil, nil, nil, removeDataDir, err
}
db := storage.NewDBAPI(localStore)
delivery := NewDelivery(to, db)
streamer := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
SkipCheck: defaultSkipCheck,
})
teardown := func() {
streamer.Close()
removeDataDir()
}
protocolTester := p2ptest.NewProtocolTester(t, network.NewNodeIDFromAddr(addr), 1, streamer.runProtocol)
err = waitForPeers(streamer, 1*time.Second, 1)
if err != nil {
return nil, nil, nil, nil, errors.New("timeout: peer is not created")
}
return protocolTester, streamer, localStore, teardown, nil
}
func waitForPeers(streamer *Registry, timeout time.Duration, expectedPeers int) error {
ticker := time.NewTicker(10 * time.Millisecond)
timeoutTimer := time.NewTimer(timeout)
for {
select {
case <-ticker.C:
if streamer.peersCount() >= expectedPeers {
return nil
}
case <-timeoutTimer.C:
return errors.New("timeout")
}
}
}
type roundRobinStore struct {
index uint32
stores []storage.ChunkStore
}
func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
return &roundRobinStore{
stores: stores,
}
}
func (rrs *roundRobinStore) Get(addr storage.Address) (*storage.Chunk, error) {
return nil, errors.New("get not well defined on round robin store")
}
func (rrs *roundRobinStore) Put(chunk *storage.Chunk) {
i := atomic.AddUint32(&rrs.index, 1)
idx := int(i) % len(rrs.stores)
rrs.stores[idx].Put(chunk)
}
func (rrs *roundRobinStore) Close() {
for _, store := range rrs.stores {
store.Close()
}
}
type TestRegistry struct {
*Registry
fileStore *storage.FileStore
}
func (r *TestRegistry) APIs() []rpc.API {
a := r.Registry.APIs()
a = append(a, rpc.API{
Namespace: "stream",
Version: "3.0",
Service: r,
Public: true,
})
return a
}
func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
r, _ := fileStore.Retrieve(hash)
buf := make([]byte, 1024)
var n int
var total int64
var err error
for (total == 0 || n > 0) && err == nil {
n, err = r.ReadAt(buf, total)
total += int64(n)
}
if err != nil && err != io.EOF {
return total, err
}
return total, nil
}
func (r *TestRegistry) ReadAll(hash common.Hash) (int64, error) {
return readAll(r.fileStore, hash[:])
}
func (r *TestRegistry) Start(server *p2p.Server) error {
return r.Registry.Start(server)
}
func (r *TestRegistry) Stop() error {
return r.Registry.Stop()
}
type TestExternalRegistry struct {
*Registry
}
func (r *TestExternalRegistry) APIs() []rpc.API {
a := r.Registry.APIs()
a = append(a, rpc.API{
Namespace: "stream",
Version: "3.0",
Service: r,
Public: true,
})
return a
}
func (r *TestExternalRegistry) GetHashes(ctx context.Context, peerId discover.NodeID, s Stream) (*rpc.Subscription, error) {
peer := r.getPeer(peerId)
client, err := peer.getClient(ctx, s)
if err != nil {
return nil, err
}
c := client.Client.(*testExternalClient)
notifier, supported := rpc.NotifierFromContext(ctx)
if !supported {
return nil, fmt.Errorf("Subscribe not supported")
}
sub := notifier.CreateSubscription()
go func() {
// if we begin sending event immediately some events
// will probably be dropped since the subscription ID might not be send to
// the client.
// ref: rpc/subscription_test.go#L65
time.Sleep(1 * time.Second)
for {
select {
case h := <-c.hashes:
<-c.enableNotificationsC // wait for notification subscription to complete
if err := notifier.Notify(sub.ID, h); err != nil {
log.Warn(fmt.Sprintf("rpc sub notifier notify stream %s: %v", s, err))
}
case err := <-sub.Err():
if err != nil {
log.Warn(fmt.Sprintf("caught subscription error in stream %s: %v", s, err))
}
case <-notifier.Closed():
log.Trace(fmt.Sprintf("rpc sub notifier closed"))
return
}
}
}()
return sub, nil
}
func (r *TestExternalRegistry) EnableNotifications(peerId discover.NodeID, s Stream) error {
peer := r.getPeer(peerId)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
client, err := peer.getClient(ctx, s)
if err != nil {
return err
}
close(client.Client.(*testExternalClient).enableNotificationsC)
return nil
}
// TODO: merge functionalities of testExternalClient and testExternalServer
// with testClient and testServer.
type testExternalClient struct {
hashes chan []byte
db *storage.DBAPI
enableNotificationsC chan struct{}
}
func newTestExternalClient(db *storage.DBAPI) *testExternalClient {
return &testExternalClient{
hashes: make(chan []byte),
db: db,
enableNotificationsC: make(chan struct{}),
}
}
func (c *testExternalClient) NeedData(hash []byte) func() {
chunk, _ := c.db.GetOrCreateRequest(hash)
if chunk.ReqC == nil {
return nil
}
c.hashes <- hash
return func() {
chunk.WaitToStore()
}
}
func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
return nil
}
func (c *testExternalClient) Close() {}
const testExternalServerBatchSize = 10
type testExternalServer struct {
t string
keyFunc func(key []byte, index uint64)
sessionAt uint64
maxKeys uint64
streamer *TestExternalRegistry
}
func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer {
if keyFunc == nil {
keyFunc = binary.BigEndian.PutUint64
}
return &testExternalServer{
t: t,
keyFunc: keyFunc,
sessionAt: sessionAt,
maxKeys: maxKeys,
}
}
func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
if from == 0 && to == 0 {
from = s.sessionAt
to = s.sessionAt + testExternalServerBatchSize
}
if to-from > testExternalServerBatchSize {
to = from + testExternalServerBatchSize - 1
}
if from >= s.maxKeys && to > s.maxKeys {
return nil, 0, 0, nil, io.EOF
}
if to > s.maxKeys {
to = s.maxKeys
}
b := make([]byte, HashSize*(to-from+1))
for i := from; i <= to; i++ {
s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
}
return b, from, to, nil, nil
}
func (s *testExternalServer) GetData([]byte) ([]byte, error) {
return make([]byte, 4096), nil
}
func (s *testExternalServer) Close() {}
// Sets the global value defaultSkipCheck.
// It should be used in test function defer to reset the global value
// to the original value.
//
// defer setDefaultSkipCheck(defaultSkipCheck)
// defaultSkipCheck = skipCheck
//
// This works as defer function arguments evaluations are evaluated as ususal,
// but only the function body invocation is deferred.
func setDefaultSkipCheck(skipCheck bool) {
defaultSkipCheck = skipCheck
}

@ -0,0 +1,272 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package stream
import (
"errors"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
"github.com/ethereum/go-ethereum/swarm/storage"
)
const (
swarmChunkServerStreamName = "RETRIEVE_REQUEST"
deliveryCap = 32
)
var (
processReceivedChunksCount = metrics.NewRegisteredCounter("network.stream.received_chunks.count", nil)
handleRetrieveRequestMsgCount = metrics.NewRegisteredCounter("network.stream.handle_retrieve_request_msg.count", nil)
requestFromPeersCount = metrics.NewRegisteredCounter("network.stream.request_from_peers.count", nil)
requestFromPeersEachCount = metrics.NewRegisteredCounter("network.stream.request_from_peers_each.count", nil)
)
type Delivery struct {
db *storage.DBAPI
overlay network.Overlay
receiveC chan *ChunkDeliveryMsg
getPeer func(discover.NodeID) *Peer
}
func NewDelivery(overlay network.Overlay, db *storage.DBAPI) *Delivery {
d := &Delivery{
db: db,
overlay: overlay,
receiveC: make(chan *ChunkDeliveryMsg, deliveryCap),
}
go d.processReceivedChunks()
return d
}
// SwarmChunkServer implements Server
type SwarmChunkServer struct {
deliveryC chan []byte
batchC chan []byte
db *storage.DBAPI
currentLen uint64
quit chan struct{}
}
// NewSwarmChunkServer is SwarmChunkServer constructor
func NewSwarmChunkServer(db *storage.DBAPI) *SwarmChunkServer {
s := &SwarmChunkServer{
deliveryC: make(chan []byte, deliveryCap),
batchC: make(chan []byte),
db: db,
quit: make(chan struct{}),
}
go s.processDeliveries()
return s
}
// processDeliveries handles delivered chunk hashes
func (s *SwarmChunkServer) processDeliveries() {
var hashes []byte
var batchC chan []byte
for {
select {
case <-s.quit:
return
case hash := <-s.deliveryC:
hashes = append(hashes, hash...)
batchC = s.batchC
case batchC <- hashes:
hashes = nil
batchC = nil
}
}
}
// SetNextBatch
func (s *SwarmChunkServer) SetNextBatch(_, _ uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) {
select {
case hashes = <-s.batchC:
case <-s.quit:
return
}
from = s.currentLen
s.currentLen += uint64(len(hashes))
to = s.currentLen
return
}
// Close needs to be called on a stream server
func (s *SwarmChunkServer) Close() {
close(s.quit)
}
// GetData retrives chunk data from db store
func (s *SwarmChunkServer) GetData(key []byte) ([]byte, error) {
chunk, err := s.db.Get(storage.Address(key))
if err == storage.ErrFetching {
<-chunk.ReqC
} else if err != nil {
return nil, err
}
return chunk.SData, nil
}
// RetrieveRequestMsg is the protocol msg for chunk retrieve requests
type RetrieveRequestMsg struct {
Addr storage.Address
SkipCheck bool
}
func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) error {
log.Trace("received request", "peer", sp.ID(), "hash", req.Addr)
handleRetrieveRequestMsgCount.Inc(1)
s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", false))
if err != nil {
return err
}
streamer := s.Server.(*SwarmChunkServer)
chunk, created := d.db.GetOrCreateRequest(req.Addr)
if chunk.ReqC != nil {
if created {
if err := d.RequestFromPeers(chunk.Addr[:], true, sp.ID()); err != nil {
log.Warn("unable to forward chunk request", "peer", sp.ID(), "key", chunk.Addr, "err", err)
chunk.SetErrored(storage.ErrChunkForward)
return nil
}
}
go func() {
t := time.NewTimer(10 * time.Minute)
defer t.Stop()
log.Debug("waiting delivery", "peer", sp.ID(), "hash", req.Addr, "node", common.Bytes2Hex(d.overlay.BaseAddr()), "created", created)
start := time.Now()
select {
case <-chunk.ReqC:
log.Debug("retrieve request ReqC closed", "peer", sp.ID(), "hash", req.Addr, "time", time.Since(start))
case <-t.C:
log.Debug("retrieve request timeout", "peer", sp.ID(), "hash", req.Addr)
chunk.SetErrored(storage.ErrChunkTimeout)
return
}
chunk.SetErrored(nil)
if req.SkipCheck {
err := sp.Deliver(chunk, s.priority)
if err != nil {
log.Warn("ERROR in handleRetrieveRequestMsg, DROPPING peer!", "err", err)
sp.Drop(err)
}
}
streamer.deliveryC <- chunk.Addr[:]
}()
return nil
}
// TODO: call the retrieve function of the outgoing syncer
if req.SkipCheck {
log.Trace("deliver", "peer", sp.ID(), "hash", chunk.Addr)
if length := len(chunk.SData); length < 9 {
log.Error("Chunk.SData to deliver is too short", "len(chunk.SData)", length, "address", chunk.Addr)
}
return sp.Deliver(chunk, s.priority)
}
streamer.deliveryC <- chunk.Addr[:]
return nil
}
type ChunkDeliveryMsg struct {
Addr storage.Address
SData []byte // the stored chunk Data (incl size)
peer *Peer // set in handleChunkDeliveryMsg
}
func (d *Delivery) handleChunkDeliveryMsg(sp *Peer, req *ChunkDeliveryMsg) error {
req.peer = sp
d.receiveC <- req
return nil
}
func (d *Delivery) processReceivedChunks() {
R:
for req := range d.receiveC {
processReceivedChunksCount.Inc(1)
// this should be has locally
chunk, err := d.db.Get(req.Addr)
if err == nil {
continue R
}
if err != storage.ErrFetching {
log.Error("processReceivedChunks db error", "addr", req.Addr, "err", err, "chunk", chunk)
continue R
}
select {
case <-chunk.ReqC:
log.Error("someone else delivered?", "hash", chunk.Addr.Hex())
continue R
default:
}
chunk.SData = req.SData
d.db.Put(chunk)
go func(req *ChunkDeliveryMsg) {
err := chunk.WaitToStore()
if err == storage.ErrChunkInvalid {
req.peer.Drop(err)
}
}(req)
}
}
// RequestFromPeers sends a chunk retrieve request to
func (d *Delivery) RequestFromPeers(hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error {
var success bool
var err error
requestFromPeersCount.Inc(1)
d.overlay.EachConn(hash, 255, func(p network.OverlayConn, po int, nn bool) bool {
spId := p.(network.Peer).ID()
for _, p := range peersToSkip {
if p == spId {
log.Trace("Delivery.RequestFromPeers: skip peer", "peer", spId)
return true
}
}
sp := d.getPeer(spId)
if sp == nil {
log.Warn("Delivery.RequestFromPeers: peer not found", "id", spId)
return true
}
// TODO: skip light nodes that do not accept retrieve requests
err = sp.SendPriority(&RetrieveRequestMsg{
Addr: hash,
SkipCheck: skipCheck,
}, Top)
if err != nil {
return true
}
requestFromPeersEachCount.Inc(1)
success = true
return false
})
if success {
return nil
}
return errors.New("no peer found")
}

@ -0,0 +1,699 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package stream
import (
"bytes"
"context"
crand "crypto/rand"
"fmt"
"io"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations"
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/network"
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing"
"github.com/ethereum/go-ethereum/swarm/storage"
)
func TestStreamerRetrieveRequest(t *testing.T) {
tester, streamer, _, teardown, err := newStreamerTester(t)
defer teardown()
if err != nil {
t.Fatal(err)
}
peerID := tester.IDs[0]
streamer.delivery.RequestFromPeers(hash0[:], true)
err = tester.TestExchanges(p2ptest.Exchange{
Label: "RetrieveRequestMsg",
Expects: []p2ptest.Expect{
{
Code: 5,
Msg: &RetrieveRequestMsg{
Addr: hash0[:],
SkipCheck: true,
},
Peer: peerID,
},
},
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
}
func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
tester, streamer, _, teardown, err := newStreamerTester(t)
defer teardown()
if err != nil {
t.Fatal(err)
}
peerID := tester.IDs[0]
chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
peer := streamer.getPeer(peerID)
peer.handleSubscribeMsg(&SubscribeMsg{
Stream: NewStream(swarmChunkServerStreamName, "", false),
History: nil,
Priority: Top,
})
err = tester.TestExchanges(p2ptest.Exchange{
Label: "RetrieveRequestMsg",
Triggers: []p2ptest.Trigger{
{
Code: 5,
Msg: &RetrieveRequestMsg{
Addr: chunk.Addr[:],
},
Peer: peerID,
},
},
Expects: []p2ptest.Expect{
{
Code: 1,
Msg: &OfferedHashesMsg{
HandoverProof: nil,
Hashes: nil,
From: 0,
To: 0,
},
Peer: peerID,
},
},
})
expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
if err == nil || err.Error() != expectedError {
t.Fatalf("Expected error %v, got %v", expectedError, err)
}
}
// upstream request server receives a retrieve Request and responds with
// offered hashes or delivery if skipHash is set to true
func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
tester, streamer, localStore, teardown, err := newStreamerTester(t)
defer teardown()
if err != nil {
t.Fatal(err)
}
peerID := tester.IDs[0]
peer := streamer.getPeer(peerID)
stream := NewStream(swarmChunkServerStreamName, "", false)
peer.handleSubscribeMsg(&SubscribeMsg{
Stream: stream,
History: nil,
Priority: Top,
})
hash := storage.Address(hash0[:])
chunk := storage.NewChunk(hash, nil)
chunk.SData = hash
localStore.Put(chunk)
chunk.WaitToStore()
err = tester.TestExchanges(p2ptest.Exchange{
Label: "RetrieveRequestMsg",
Triggers: []p2ptest.Trigger{
{
Code: 5,
Msg: &RetrieveRequestMsg{
Addr: hash,
},
Peer: peerID,
},
},
Expects: []p2ptest.Expect{
{
Code: 1,
Msg: &OfferedHashesMsg{
HandoverProof: &HandoverProof{
Handover: &Handover{},
},
Hashes: hash,
From: 0,
// TODO: why is this 32???
To: 32,
Stream: stream,
},
Peer: peerID,
},
},
})
if err != nil {
t.Fatal(err)
}
hash = storage.Address(hash1[:])
chunk = storage.NewChunk(hash, nil)
chunk.SData = hash1[:]
localStore.Put(chunk)
chunk.WaitToStore()
err = tester.TestExchanges(p2ptest.Exchange{
Label: "RetrieveRequestMsg",
Triggers: []p2ptest.Trigger{
{
Code: 5,
Msg: &RetrieveRequestMsg{
Addr: hash,
SkipCheck: true,
},
Peer: peerID,
},
},
Expects: []p2ptest.Expect{
{
Code: 6,
Msg: &ChunkDeliveryMsg{
Addr: hash,
SData: hash,
},
Peer: peerID,
},
},
})
if err != nil {
t.Fatal(err)
}
}
func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
tester, streamer, localStore, teardown, err := newStreamerTester(t)
defer teardown()
if err != nil {
t.Fatal(err)
}
streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
return &testClient{
t: t,
}, nil
})
peerID := tester.IDs[0]
stream := NewStream("foo", "", true)
err = streamer.Subscribe(peerID, stream, NewRange(5, 8), Top)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
chunkKey := hash0[:]
chunkData := hash1[:]
chunk, created := localStore.GetOrCreateRequest(chunkKey)
if !created {
t.Fatal("chunk already exists")
}
select {
case <-chunk.ReqC:
t.Fatal("chunk is already received")
default:
}
err = tester.TestExchanges(p2ptest.Exchange{
Label: "Subscribe message",
Expects: []p2ptest.Expect{
{
Code: 4,
Msg: &SubscribeMsg{
Stream: stream,
History: NewRange(5, 8),
Priority: Top,
},
Peer: peerID,
},
},
},
p2ptest.Exchange{
Label: "ChunkDeliveryRequest message",
Triggers: []p2ptest.Trigger{
{
Code: 6,
Msg: &ChunkDeliveryMsg{
Addr: chunkKey,
SData: chunkData,
},
Peer: peerID,
},
},
})
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
timeout := time.NewTimer(1 * time.Second)
select {
case <-timeout.C:
t.Fatal("timeout receiving chunk")
case <-chunk.ReqC:
}
storedChunk, err := localStore.Get(chunkKey)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if !bytes.Equal(storedChunk.SData, chunkData) {
t.Fatal("Retrieved chunk has different data than original")
}
}
func TestDeliveryFromNodes(t *testing.T) {
testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
}
func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
defaultSkipCheck = skipCheck
toAddr = network.NewAddrFromNodeID
createStoreFunc = createTestLocalStorageFromSim
conf := &streamTesting.RunConfig{
Adapter: *adapter,
NodeCount: nodes,
ConnLevel: conns,
ToAddr: toAddr,
Services: services,
EnableMsgEvents: false,
}
sim, teardown, err := streamTesting.NewSimulation(conf)
var rpcSubscriptionsWg sync.WaitGroup
defer func() {
rpcSubscriptionsWg.Wait()
teardown()
}()
if err != nil {
t.Fatal(err.Error())
}
stores = make(map[discover.NodeID]storage.ChunkStore)
for i, id := range sim.IDs {
stores[id] = sim.Stores[i]
}
registries = make(map[discover.NodeID]*TestRegistry)
deliveries = make(map[discover.NodeID]*Delivery)
peerCount = func(id discover.NodeID) int {
if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
return 1
}
return 2
}
// here we distribute chunks of a random file into Stores of nodes 1 to nodes
rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
size := chunkCount * chunkSize
fileHash, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
// wait until all chunks stored
wait()
if err != nil {
t.Fatal(err.Error())
}
errc := make(chan error, 1)
waitPeerErrC = make(chan error)
quitC := make(chan struct{})
defer close(quitC)
action := func(ctx context.Context) error {
// each node Subscribes to each other's swarmChunkServerStreamName
// need to wait till an aynchronous process registers the peers in streamer.peers
// that is used by Subscribe
// using a global err channel to share betweem action and node service
i := 0
for err := range waitPeerErrC {
if err != nil {
return fmt.Errorf("error waiting for peers: %s", err)
}
i++
if i == nodes {
break
}
}
// each node subscribes to the upstream swarm chunk server stream
// which responds to chunk retrieve requests all but the last node in the chain does not
for j := 0; j < nodes-1; j++ {
id := sim.IDs[j]
err := sim.CallClient(id, func(client *rpc.Client) error {
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-doneC
rpcSubscriptionsWg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
sid := sim.IDs[j+1]
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
})
if err != nil {
return err
}
}
// create a retriever FileStore for the pivot node
delivery := deliveries[sim.IDs[0]]
retrieveFunc := func(chunk *storage.Chunk) error {
return delivery.RequestFromPeers(chunk.Addr[:], skipCheck)
}
netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc)
fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
go func() {
// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
// we must wait for the peer connections to have started before requesting
n, err := readAll(fileStore, fileHash)
log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
if err != nil {
errc <- fmt.Errorf("requesting chunks action error: %v", err)
}
}()
return nil
}
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
select {
case err := <-errc:
return false, err
case <-ctx.Done():
return false, ctx.Err()
default:
}
var total int64
err := sim.CallClient(id, func(client *rpc.Client) error {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
return client.CallContext(ctx, &total, "stream_readAll", common.BytesToHash(fileHash))
})
log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
if err != nil || total != int64(size) {
return false, nil
}
return true, nil
}
conf.Step = &simulations.Step{
Action: action,
Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]),
// we are only testing the pivot node (net.Nodes[0])
Expect: &simulations.Expectation{
Nodes: sim.IDs[0:1],
Check: check,
},
}
startedAt := time.Now()
timeout := 300 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
result, err := sim.Run(ctx, conf)
finishedAt := time.Now()
if err != nil {
t.Fatalf("Setting up simulation failed: %v", err)
}
if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error)
}
streamTesting.CheckResult(t, result, startedAt, finishedAt)
}
func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
for chunks := 32; chunks <= 128; chunks *= 2 {
for i := 2; i < 32; i *= 2 {
b.Run(
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
func(b *testing.B) {
benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
},
)
}
}
}
func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
for chunks := 32; chunks <= 128; chunks *= 2 {
for i := 2; i < 32; i *= 2 {
b.Run(
fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
func(b *testing.B) {
benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
},
)
}
}
}
func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
defaultSkipCheck = skipCheck
toAddr = network.NewAddrFromNodeID
createStoreFunc = createTestLocalStorageFromSim
registries = make(map[discover.NodeID]*TestRegistry)
timeout := 300 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
conf := &streamTesting.RunConfig{
Adapter: *adapter,
NodeCount: nodes,
ConnLevel: conns,
ToAddr: toAddr,
Services: services,
EnableMsgEvents: false,
}
sim, teardown, err := streamTesting.NewSimulation(conf)
var rpcSubscriptionsWg sync.WaitGroup
defer func() {
rpcSubscriptionsWg.Wait()
teardown()
}()
if err != nil {
b.Fatal(err.Error())
}
stores = make(map[discover.NodeID]storage.ChunkStore)
deliveries = make(map[discover.NodeID]*Delivery)
for i, id := range sim.IDs {
stores[id] = sim.Stores[i]
}
peerCount = func(id discover.NodeID) int {
if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
return 1
}
return 2
}
// wait channel for all nodes all peer connections to set up
waitPeerErrC = make(chan error)
// create a FileStore for the last node in the chain which we are gonna write to
remoteFileStore := storage.NewFileStore(sim.Stores[nodes-1], storage.NewFileStoreParams())
// channel to signal simulation initialisation with action call complete
// or node disconnections
disconnectC := make(chan error)
quitC := make(chan struct{})
initC := make(chan error)
action := func(ctx context.Context) error {
// each node Subscribes to each other's swarmChunkServerStreamName
// need to wait till an aynchronous process registers the peers in streamer.peers
// that is used by Subscribe
// waitPeerErrC using a global err channel to share betweem action and node service
i := 0
for err := range waitPeerErrC {
if err != nil {
return fmt.Errorf("error waiting for peers: %s", err)
}
i++
if i == nodes {
break
}
}
var err error
// each node except the last one subscribes to the upstream swarm chunk server stream
// which responds to chunk retrieve requests
for j := 0; j < nodes-1; j++ {
id := sim.IDs[j]
err = sim.CallClient(id, func(client *rpc.Client) error {
doneC, err := streamTesting.WatchDisconnections(id, client, disconnectC, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-doneC
rpcSubscriptionsWg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
sid := sim.IDs[j+1] // the upstream peer's id
return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(swarmChunkServerStreamName, "", false), NewRange(0, 0), Top)
})
if err != nil {
break
}
}
initC <- err
return nil
}
// the check function is only triggered when the benchmark finishes
trigger := make(chan discover.NodeID)
check := func(ctx context.Context, id discover.NodeID) (_ bool, err error) {
return true, nil
}
conf.Step = &simulations.Step{
Action: action,
Trigger: trigger,
// we are only testing the pivot node (net.Nodes[0])
Expect: &simulations.Expectation{
Nodes: sim.IDs[0:1],
Check: check,
},
}
// run the simulation in the background
errc := make(chan error)
go func() {
_, err := sim.Run(ctx, conf)
close(quitC)
errc <- err
}()
// wait for simulation action to complete stream subscriptions
err = <-initC
if err != nil {
b.Fatalf("simulation failed to initialise. expected no error. got %v", err)
}
// create a retriever FileStore for the pivot node
// by now deliveries are set for each node by the streamer service
delivery := deliveries[sim.IDs[0]]
retrieveFunc := func(chunk *storage.Chunk) error {
return delivery.RequestFromPeers(chunk.Addr[:], skipCheck)
}
netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc)
// benchmark loop
b.ResetTimer()
b.StopTimer()
Loop:
for i := 0; i < b.N; i++ {
// uploading chunkCount random chunks to the last node
hashes := make([]storage.Address, chunkCount)
for i := 0; i < chunkCount; i++ {
// create actual size real chunks
hash, wait, err := remoteFileStore.Store(io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
// wait until all chunks stored
wait()
if err != nil {
b.Fatalf("expected no error. got %v", err)
}
// collect the hashes
hashes[i] = hash
}
// now benchmark the actual retrieval
// netstore.Get is called for each hash in a go routine and errors are collected
b.StartTimer()
errs := make(chan error)
for _, hash := range hashes {
go func(h storage.Address) {
_, err := netStore.Get(h)
log.Warn("test check netstore get", "hash", h, "err", err)
errs <- err
}(hash)
}
// count and report retrieval errors
// if there are misses then chunk timeout is too low for the distance and volume (?)
var total, misses int
for err := range errs {
if err != nil {
log.Warn(err.Error())
misses++
}
total++
if total == chunkCount {
break
}
}
b.StopTimer()
select {
case err = <-disconnectC:
if err != nil {
break Loop
}
default:
}
if misses > 0 {
err = fmt.Errorf("%v chunk not found out of %v", misses, total)
break Loop
}
}
select {
case <-quitC:
case trigger <- sim.IDs[0]:
}
if err == nil {
err = <-errc
} else {
if e := <-errc; e != nil {
b.Errorf("sim.Run function error: %v", e)
}
}
// benchmark over, trigger the check function to conclude the simulation
if err != nil {
b.Fatalf("expected no error. got %v", err)
}
}
func createTestLocalStorageFromSim(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
return stores[id], nil
}

@ -0,0 +1,42 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package intervals
import (
"io/ioutil"
"os"
"testing"
"github.com/ethereum/go-ethereum/swarm/state"
)
// TestDBStore tests basic functionality of DBStore.
func TestDBStore(t *testing.T) {
dir, err := ioutil.TempDir("", "intervals_test_db_store")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
store, err := state.NewDBStore(dir)
if err != nil {
t.Fatal(err)
}
defer store.Close()
testStore(t, store)
}

@ -0,0 +1,206 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package intervals
import (
"bytes"
"fmt"
"strconv"
"sync"
)
// Intervals store a list of intervals. Its purpose is to provide
// methods to add new intervals and retrieve missing intervals that
// need to be added.
// It may be used in synchronization of streaming data to persist
// retrieved data ranges between sessions.
type Intervals struct {
start uint64
ranges [][2]uint64
mu sync.RWMutex
}
// New creates a new instance of Intervals.
// Start argument limits the lower bound of intervals.
// No range bellow start bound will be added by Add method or
// returned by Next method. This limit may be used for
// tracking "live" synchronization, where the sync session
// starts from a specific value, and if "live" sync intervals
// need to be merged with historical ones, it can be safely done.
func NewIntervals(start uint64) *Intervals {
return &Intervals{
start: start,
}
}
// Add adds a new range to intervals. Range start and end are values
// are both inclusive.
func (i *Intervals) Add(start, end uint64) {
i.mu.Lock()
defer i.mu.Unlock()
i.add(start, end)
}
func (i *Intervals) add(start, end uint64) {
if start < i.start {
start = i.start
}
if end < i.start {
return
}
minStartJ := -1
maxEndJ := -1
j := 0
for ; j < len(i.ranges); j++ {
if minStartJ < 0 {
if (start <= i.ranges[j][0] && end+1 >= i.ranges[j][0]) || (start <= i.ranges[j][1]+1 && end+1 >= i.ranges[j][1]) {
if i.ranges[j][0] < start {
start = i.ranges[j][0]
}
minStartJ = j
}
}
if (start <= i.ranges[j][1] && end+1 >= i.ranges[j][1]) || (start <= i.ranges[j][0] && end+1 >= i.ranges[j][0]) {
if i.ranges[j][1] > end {
end = i.ranges[j][1]
}
maxEndJ = j
}
if end+1 <= i.ranges[j][0] {
break
}
}
if minStartJ < 0 && maxEndJ < 0 {
i.ranges = append(i.ranges[:j], append([][2]uint64{{start, end}}, i.ranges[j:]...)...)
return
}
if minStartJ >= 0 {
i.ranges[minStartJ][0] = start
}
if maxEndJ >= 0 {
i.ranges[maxEndJ][1] = end
}
if minStartJ >= 0 && maxEndJ >= 0 && minStartJ != maxEndJ {
i.ranges[maxEndJ][0] = start
i.ranges = append(i.ranges[:minStartJ], i.ranges[maxEndJ:]...)
}
}
// Merge adds all the intervals from the the m Interval to current one.
func (i *Intervals) Merge(m *Intervals) {
m.mu.RLock()
defer m.mu.RUnlock()
i.mu.Lock()
defer i.mu.Unlock()
for _, r := range m.ranges {
i.add(r[0], r[1])
}
}
// Next returns the first range interval that is not fulfilled. Returned
// start and end values are both inclusive, meaning that the whole range
// including start and end need to be added in order to full the gap
// in intervals.
// Returned value for end is 0 if the next interval is after the whole
// range that is stored in Intervals. Zero end value represents no limit
// on the next interval length.
func (i *Intervals) Next() (start, end uint64) {
i.mu.RLock()
defer i.mu.RUnlock()
l := len(i.ranges)
if l == 0 {
return i.start, 0
}
if i.ranges[0][0] != i.start {
return i.start, i.ranges[0][0] - 1
}
if l == 1 {
return i.ranges[0][1] + 1, 0
}
return i.ranges[0][1] + 1, i.ranges[1][0] - 1
}
// Last returns the value that is at the end of the last interval.
func (i *Intervals) Last() (end uint64) {
i.mu.RLock()
defer i.mu.RUnlock()
l := len(i.ranges)
if l == 0 {
return 0
}
return i.ranges[l-1][1]
}
// String returns a descriptive representation of range intervals
// in [] notation, as a list of two element vectors.
func (i *Intervals) String() string {
return fmt.Sprint(i.ranges)
}
// MarshalBinary encodes Intervals parameters into a semicolon separated list.
// The first element in the list is base36-encoded start value. The following
// elements are two base36-encoded value ranges separated by comma.
func (i *Intervals) MarshalBinary() (data []byte, err error) {
d := make([][]byte, len(i.ranges)+1)
d[0] = []byte(strconv.FormatUint(i.start, 36))
for j := range i.ranges {
r := i.ranges[j]
d[j+1] = []byte(strconv.FormatUint(r[0], 36) + "," + strconv.FormatUint(r[1], 36))
}
return bytes.Join(d, []byte(";")), nil
}
// UnmarshalBinary decodes data according to the Intervals.MarshalBinary format.
func (i *Intervals) UnmarshalBinary(data []byte) (err error) {
d := bytes.Split(data, []byte(";"))
l := len(d)
if l == 0 {
return nil
}
if l >= 1 {
i.start, err = strconv.ParseUint(string(d[0]), 36, 64)
if err != nil {
return err
}
}
if l == 1 {
return nil
}
i.ranges = make([][2]uint64, 0, l-1)
for j := 1; j < l; j++ {
r := bytes.SplitN(d[j], []byte(","), 2)
if len(r) < 2 {
return fmt.Errorf("range %d has less then 2 elements", j)
}
start, err := strconv.ParseUint(string(r[0]), 36, 64)
if err != nil {
return fmt.Errorf("parsing the first element in range %d: %v", j, err)
}
end, err := strconv.ParseUint(string(r[1]), 36, 64)
if err != nil {
return fmt.Errorf("parsing the second element in range %d: %v", j, err)
}
i.ranges = append(i.ranges, [2]uint64{start, end})
}
return nil
}

@ -0,0 +1,395 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package intervals
import "testing"
// Test tests Interval methods Add, Next and Last for various
// initial state.
func Test(t *testing.T) {
for i, tc := range []struct {
startLimit uint64
initial [][2]uint64
start uint64
end uint64
expected string
nextStart uint64
nextEnd uint64
last uint64
}{
{
initial: nil,
start: 0,
end: 0,
expected: "[[0 0]]",
nextStart: 1,
nextEnd: 0,
last: 0,
},
{
initial: nil,
start: 0,
end: 10,
expected: "[[0 10]]",
nextStart: 11,
nextEnd: 0,
last: 10,
},
{
initial: nil,
start: 5,
end: 15,
expected: "[[5 15]]",
nextStart: 0,
nextEnd: 4,
last: 15,
},
{
initial: [][2]uint64{{0, 0}},
start: 0,
end: 0,
expected: "[[0 0]]",
nextStart: 1,
nextEnd: 0,
last: 0,
},
{
initial: [][2]uint64{{0, 0}},
start: 5,
end: 15,
expected: "[[0 0] [5 15]]",
nextStart: 1,
nextEnd: 4,
last: 15,
},
{
initial: [][2]uint64{{5, 15}},
start: 5,
end: 15,
expected: "[[5 15]]",
nextStart: 0,
nextEnd: 4,
last: 15,
},
{
initial: [][2]uint64{{5, 15}},
start: 5,
end: 20,
expected: "[[5 20]]",
nextStart: 0,
nextEnd: 4,
last: 20,
},
{
initial: [][2]uint64{{5, 15}},
start: 10,
end: 20,
expected: "[[5 20]]",
nextStart: 0,
nextEnd: 4,
last: 20,
},
{
initial: [][2]uint64{{5, 15}},
start: 0,
end: 20,
expected: "[[0 20]]",
nextStart: 21,
nextEnd: 0,
last: 20,
},
{
initial: [][2]uint64{{5, 15}},
start: 2,
end: 10,
expected: "[[2 15]]",
nextStart: 0,
nextEnd: 1,
last: 15,
},
{
initial: [][2]uint64{{5, 15}},
start: 2,
end: 4,
expected: "[[2 15]]",
nextStart: 0,
nextEnd: 1,
last: 15,
},
{
initial: [][2]uint64{{5, 15}},
start: 2,
end: 5,
expected: "[[2 15]]",
nextStart: 0,
nextEnd: 1,
last: 15,
},
{
initial: [][2]uint64{{5, 15}},
start: 2,
end: 3,
expected: "[[2 3] [5 15]]",
nextStart: 0,
nextEnd: 1,
last: 15,
},
{
initial: [][2]uint64{{5, 15}},
start: 2,
end: 4,
expected: "[[2 15]]",
nextStart: 0,
nextEnd: 1,
last: 15,
},
{
initial: [][2]uint64{{0, 1}, {5, 15}},
start: 2,
end: 4,
expected: "[[0 15]]",
nextStart: 16,
nextEnd: 0,
last: 15,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}},
start: 2,
end: 10,
expected: "[[0 10] [15 20]]",
nextStart: 11,
nextEnd: 14,
last: 20,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}},
start: 8,
end: 18,
expected: "[[0 5] [8 20]]",
nextStart: 6,
nextEnd: 7,
last: 20,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}},
start: 2,
end: 17,
expected: "[[0 20]]",
nextStart: 21,
nextEnd: 0,
last: 20,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}},
start: 2,
end: 25,
expected: "[[0 25]]",
nextStart: 26,
nextEnd: 0,
last: 25,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}},
start: 5,
end: 14,
expected: "[[0 20]]",
nextStart: 21,
nextEnd: 0,
last: 20,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}},
start: 6,
end: 14,
expected: "[[0 20]]",
nextStart: 21,
nextEnd: 0,
last: 20,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}},
start: 6,
end: 29,
expected: "[[0 40]]",
nextStart: 41,
nextEnd: 0,
last: 40,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}},
start: 3,
end: 55,
expected: "[[0 60]]",
nextStart: 61,
nextEnd: 0,
last: 60,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}},
start: 21,
end: 49,
expected: "[[0 5] [15 60]]",
nextStart: 6,
nextEnd: 14,
last: 60,
},
{
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}},
start: 0,
end: 100,
expected: "[[0 100]]",
nextStart: 101,
nextEnd: 0,
last: 100,
},
{
startLimit: 100,
initial: nil,
start: 0,
end: 0,
expected: "[]",
nextStart: 100,
nextEnd: 0,
last: 0,
},
{
startLimit: 100,
initial: nil,
start: 20,
end: 30,
expected: "[]",
nextStart: 100,
nextEnd: 0,
last: 0,
},
{
startLimit: 100,
initial: nil,
start: 50,
end: 100,
expected: "[[100 100]]",
nextStart: 101,
nextEnd: 0,
last: 100,
},
{
startLimit: 100,
initial: nil,
start: 50,
end: 110,
expected: "[[100 110]]",
nextStart: 111,
nextEnd: 0,
last: 110,
},
{
startLimit: 100,
initial: nil,
start: 120,
end: 130,
expected: "[[120 130]]",
nextStart: 100,
nextEnd: 119,
last: 130,
},
{
startLimit: 100,
initial: nil,
start: 120,
end: 130,
expected: "[[120 130]]",
nextStart: 100,
nextEnd: 119,
last: 130,
},
} {
intervals := NewIntervals(tc.startLimit)
intervals.ranges = tc.initial
intervals.Add(tc.start, tc.end)
got := intervals.String()
if got != tc.expected {
t.Errorf("interval #%d: expected %s, got %s", i, tc.expected, got)
}
nextStart, nextEnd := intervals.Next()
if nextStart != tc.nextStart {
t.Errorf("interval #%d, expected next start %d, got %d", i, tc.nextStart, nextStart)
}
if nextEnd != tc.nextEnd {
t.Errorf("interval #%d, expected next end %d, got %d", i, tc.nextEnd, nextEnd)
}
last := intervals.Last()
if last != tc.last {
t.Errorf("interval #%d, expected last %d, got %d", i, tc.last, last)
}
}
}
func TestMerge(t *testing.T) {
for i, tc := range []struct {
initial [][2]uint64
merge [][2]uint64
expected string
}{
{
initial: nil,
merge: nil,
expected: "[]",
},
{
initial: [][2]uint64{{10, 20}},
merge: nil,
expected: "[[10 20]]",
},
{
initial: nil,
merge: [][2]uint64{{15, 25}},
expected: "[[15 25]]",
},
{
initial: [][2]uint64{{0, 100}},
merge: [][2]uint64{{150, 250}},
expected: "[[0 100] [150 250]]",
},
{
initial: [][2]uint64{{0, 100}},
merge: [][2]uint64{{101, 250}},
expected: "[[0 250]]",
},
{
initial: [][2]uint64{{0, 10}, {30, 40}},
merge: [][2]uint64{{20, 25}, {41, 50}},
expected: "[[0 10] [20 25] [30 50]]",
},
{
initial: [][2]uint64{{0, 5}, {15, 20}, {30, 40}, {50, 60}},
merge: [][2]uint64{{6, 25}},
expected: "[[0 25] [30 40] [50 60]]",
},
} {
intervals := NewIntervals(0)
intervals.ranges = tc.initial
m := NewIntervals(0)
m.ranges = tc.merge
intervals.Merge(m)
got := intervals.String()
if got != tc.expected {
t.Errorf("interval #%d: expected %s, got %s", i, tc.expected, got)
}
}
}

@ -0,0 +1,80 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package intervals
import (
"errors"
"testing"
"github.com/ethereum/go-ethereum/swarm/state"
)
var ErrNotFound = errors.New("not found")
// TestInmemoryStore tests basic functionality of InmemoryStore.
func TestInmemoryStore(t *testing.T) {
testStore(t, state.NewInmemoryStore())
}
// testStore is a helper function to test various Store implementations.
func testStore(t *testing.T, s state.Store) {
key1 := "key1"
i1 := NewIntervals(0)
i1.Add(10, 20)
if err := s.Put(key1, i1); err != nil {
t.Fatal(err)
}
i := &Intervals{}
err := s.Get(key1, i)
if err != nil {
t.Fatal(err)
}
if i.String() != i1.String() {
t.Errorf("expected interval %s, got %s", i1, i)
}
key2 := "key2"
i2 := NewIntervals(0)
i2.Add(10, 20)
if err := s.Put(key2, i2); err != nil {
t.Fatal(err)
}
err = s.Get(key2, i)
if err != nil {
t.Fatal(err)
}
if i.String() != i2.String() {
t.Errorf("expected interval %s, got %s", i2, i)
}
if err := s.Delete(key1); err != nil {
t.Fatal(err)
}
if err := s.Get(key1, i); err != state.ErrNotFound {
t.Errorf("expected error %v, got %s", state.ErrNotFound, err)
}
if err := s.Get(key2, i); err != nil {
t.Errorf("expected error %v, got %s", nil, err)
}
if err := s.Delete(key2); err != nil {
t.Fatal(err)
}
if err := s.Get(key2, i); err != state.ErrNotFound {
t.Errorf("expected error %v, got %s", state.ErrNotFound, err)
}
}

@ -0,0 +1,313 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package stream
import (
"context"
crand "crypto/rand"
"encoding/binary"
"fmt"
"io"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/simulations"
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/network"
streamTesting "github.com/ethereum/go-ethereum/swarm/network/stream/testing"
"github.com/ethereum/go-ethereum/swarm/state"
"github.com/ethereum/go-ethereum/swarm/storage"
)
var (
externalStreamName = "externalStream"
externalStreamSessionAt uint64 = 50
externalStreamMaxKeys uint64 = 100
)
func newIntervalsStreamerService(ctx *adapters.ServiceContext) (node.Service, error) {
id := ctx.Config.ID
addr := toAddr(id)
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
store := stores[id].(*storage.LocalStore)
db := storage.NewDBAPI(store)
delivery := NewDelivery(kad, db)
deliveries[id] = delivery
r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
SkipCheck: defaultSkipCheck,
})
r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
return newTestExternalClient(db), nil
})
r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
})
go func() {
waitPeerErrC <- waitForPeers(r, 1*time.Second, peerCount(id))
}()
return &TestExternalRegistry{r}, nil
}
func TestIntervals(t *testing.T) {
testIntervals(t, true, nil, false)
testIntervals(t, false, NewRange(9, 26), false)
testIntervals(t, true, NewRange(9, 26), false)
testIntervals(t, true, nil, true)
testIntervals(t, false, NewRange(9, 26), true)
testIntervals(t, true, NewRange(9, 26), true)
}
func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
nodes := 2
chunkCount := dataChunkCount
defer setDefaultSkipCheck(defaultSkipCheck)
defaultSkipCheck = skipCheck
toAddr = network.NewAddrFromNodeID
conf := &streamTesting.RunConfig{
Adapter: *adapter,
NodeCount: nodes,
ConnLevel: 1,
ToAddr: toAddr,
Services: services,
DefaultService: "intervalsStreamer",
}
sim, teardown, err := streamTesting.NewSimulation(conf)
var rpcSubscriptionsWg sync.WaitGroup
defer func() {
rpcSubscriptionsWg.Wait()
teardown()
}()
if err != nil {
t.Fatal(err)
}
stores = make(map[discover.NodeID]storage.ChunkStore)
deliveries = make(map[discover.NodeID]*Delivery)
for i, id := range sim.IDs {
stores[id] = sim.Stores[i]
}
peerCount = func(id discover.NodeID) int {
return 1
}
fileStore := storage.NewFileStore(sim.Stores[0], storage.NewFileStoreParams())
size := chunkCount * chunkSize
_, wait, err := fileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
wait()
if err != nil {
t.Fatal(err)
}
errc := make(chan error, 1)
waitPeerErrC = make(chan error)
quitC := make(chan struct{})
defer close(quitC)
action := func(ctx context.Context) error {
i := 0
for err := range waitPeerErrC {
if err != nil {
return fmt.Errorf("error waiting for peers: %s", err)
}
i++
if i == nodes {
break
}
}
id := sim.IDs[1]
err := sim.CallClient(id, func(client *rpc.Client) error {
sid := sim.IDs[0]
doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
if err != nil {
return err
}
rpcSubscriptionsWg.Add(1)
go func() {
<-doneC
rpcSubscriptionsWg.Done()
}()
ctx, cancel := context.WithTimeout(ctx, 100*time.Second)
defer cancel()
err = client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream(externalStreamName, "", live), history, Top)
if err != nil {
return err
}
liveErrC := make(chan error)
historyErrC := make(chan error)
go func() {
if !live {
close(liveErrC)
return
}
var err error
defer func() {
liveErrC <- err
}()
// live stream
liveHashesChan := make(chan []byte)
liveSubscription, err := client.Subscribe(ctx, "stream", liveHashesChan, "getHashes", sid, NewStream(externalStreamName, "", true))
if err != nil {
return
}
defer liveSubscription.Unsubscribe()
i := externalStreamSessionAt
// we have subscribed, enable notifications
err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", true))
if err != nil {
return
}
for {
select {
case hash := <-liveHashesChan:
h := binary.BigEndian.Uint64(hash)
if h != i {
err = fmt.Errorf("expected live hash %d, got %d", i, h)
return
}
i++
if i > externalStreamMaxKeys {
return
}
case err = <-liveSubscription.Err():
return
case <-ctx.Done():
return
}
}
}()
go func() {
if live && history == nil {
close(historyErrC)
return
}
var err error
defer func() {
historyErrC <- err
}()
// history stream
historyHashesChan := make(chan []byte)
historySubscription, err := client.Subscribe(ctx, "stream", historyHashesChan, "getHashes", sid, NewStream(externalStreamName, "", false))
if err != nil {
return
}
defer historySubscription.Unsubscribe()
var i uint64
historyTo := externalStreamMaxKeys
if history != nil {
i = history.From
if history.To != 0 {
historyTo = history.To
}
}
// we have subscribed, enable notifications
err = client.CallContext(ctx, nil, "stream_enableNotifications", sid, NewStream(externalStreamName, "", false))
if err != nil {
return
}
for {
select {
case hash := <-historyHashesChan:
h := binary.BigEndian.Uint64(hash)
if h != i {
err = fmt.Errorf("expected history hash %d, got %d", i, h)
return
}
i++
if i > historyTo {
return
}
case err = <-historySubscription.Err():
return
case <-ctx.Done():
return
}
}
}()
if err := <-liveErrC; err != nil {
return err
}
if err := <-historyErrC; err != nil {
return err
}
return nil
})
return err
}
check := func(ctx context.Context, id discover.NodeID) (bool, error) {
select {
case err := <-errc:
return false, err
case <-ctx.Done():
return false, ctx.Err()
default:
}
return true, nil
}
conf.Step = &simulations.Step{
Action: action,
Trigger: streamTesting.Trigger(10*time.Millisecond, quitC, sim.IDs[0]),
Expect: &simulations.Expectation{
Nodes: sim.IDs[1:1],
Check: check,
},
}
startedAt := time.Now()
timeout := 300 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
result, err := sim.Run(ctx, conf)
finishedAt := time.Now()
if err != nil {
t.Fatalf("Setting up simulation failed: %v", err)
}
if result.Error != nil {
t.Fatalf("Simulation failed: %s", result.Error)
}
streamTesting.CheckResult(t, result, startedAt, finishedAt)
}

@ -0,0 +1,370 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package stream
import (
"errors"
"fmt"
"sync"
"time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/log"
bv "github.com/ethereum/go-ethereum/swarm/network/bitvector"
"github.com/ethereum/go-ethereum/swarm/storage"
)
// Stream defines a unique stream identifier.
type Stream struct {
// Name is used for Client and Server functions identification.
Name string
// Key is the name of specific stream data.
Key string
// Live defines whether the stream delivers only new data
// for the specific stream.
Live bool
}
func NewStream(name string, key string, live bool) Stream {
return Stream{
Name: name,
Key: key,
Live: live,
}
}
// String return a stream id based on all Stream fields.
func (s Stream) String() string {
t := "h"
if s.Live {
t = "l"
}
return fmt.Sprintf("%s|%s|%s", s.Name, s.Key, t)
}
// SubcribeMsg is the protocol msg for requesting a stream(section)
type SubscribeMsg struct {
Stream Stream
History *Range `rlp:"nil"`
Priority uint8 // delivered on priority channel
}
// RequestSubscriptionMsg is the protocol msg for a node to request subscription to a
// specific stream
type RequestSubscriptionMsg struct {
Stream Stream
History *Range `rlp:"nil"`
Priority uint8 // delivered on priority channel
}
func (p *Peer) handleRequestSubscription(req *RequestSubscriptionMsg) (err error) {
log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream))
return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority)
}
func (p *Peer) handleSubscribeMsg(req *SubscribeMsg) (err error) {
metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1)
defer func() {
if err != nil {
if e := p.Send(SubscribeErrorMsg{
Error: err.Error(),
}); e != nil {
log.Error("send stream subscribe error message", "err", err)
}
}
}()
log.Debug("received subscription", "from", p.streamer.addr.ID(), "peer", p.ID(), "stream", req.Stream, "history", req.History)
f, err := p.streamer.GetServerFunc(req.Stream.Name)
if err != nil {
return err
}
s, err := f(p, req.Stream.Key, req.Stream.Live)
if err != nil {
return err
}
os, err := p.setServer(req.Stream, s, req.Priority)
if err != nil {
return err
}
var from uint64
var to uint64
if !req.Stream.Live && req.History != nil {
from = req.History.From
to = req.History.To
}
go func() {
if err := p.SendOfferedHashes(os, from, to); err != nil {
log.Warn("SendOfferedHashes dropping peer", "err", err)
p.Drop(err)
}
}()
if req.Stream.Live && req.History != nil {
// subscribe to the history stream
s, err := f(p, req.Stream.Key, false)
if err != nil {
return err
}
os, err := p.setServer(getHistoryStream(req.Stream), s, getHistoryPriority(req.Priority))
if err != nil {
return err
}
go func() {
if err := p.SendOfferedHashes(os, req.History.From, req.History.To); err != nil {
log.Warn("SendOfferedHashes dropping peer", "err", err)
p.Drop(err)
}
}()
}
return nil
}
type SubscribeErrorMsg struct {
Error string
}
func (p *Peer) handleSubscribeErrorMsg(req *SubscribeErrorMsg) (err error) {
return fmt.Errorf("subscribe to peer %s: %v", p.ID(), req.Error)
}
type UnsubscribeMsg struct {
Stream Stream
}
func (p *Peer) handleUnsubscribeMsg(req *UnsubscribeMsg) error {
return p.removeServer(req.Stream)
}
type QuitMsg struct {
Stream Stream
}
func (p *Peer) handleQuitMsg(req *QuitMsg) error {
return p.removeClient(req.Stream)
}
// OfferedHashesMsg is the protocol msg for offering to hand over a
// stream section
type OfferedHashesMsg struct {
Stream Stream // name of Stream
From, To uint64 // peer and db-specific entry count
Hashes []byte // stream of hashes (128)
*HandoverProof // HandoverProof
}
// String pretty prints OfferedHashesMsg
func (m OfferedHashesMsg) String() string {
return fmt.Sprintf("Stream '%v' [%v-%v] (%v)", m.Stream, m.From, m.To, len(m.Hashes)/HashSize)
}
// handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface
// Filter method
func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error {
metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1)
c, _, err := p.getOrSetClient(req.Stream, req.From, req.To)
if err != nil {
return err
}
hashes := req.Hashes
want, err := bv.New(len(hashes) / HashSize)
if err != nil {
return fmt.Errorf("error initiaising bitvector of length %v: %v", len(hashes)/HashSize, err)
}
wg := sync.WaitGroup{}
for i := 0; i < len(hashes); i += HashSize {
hash := hashes[i : i+HashSize]
if wait := c.NeedData(hash); wait != nil {
want.Set(i/HashSize, true)
wg.Add(1)
// create request and wait until the chunk data arrives and is stored
go func(w func()) {
w()
wg.Done()
}(wait)
}
}
// done := make(chan bool)
// go func() {
// wg.Wait()
// close(done)
// }()
// go func() {
// select {
// case <-done:
// s.next <- s.batchDone(p, req, hashes)
// case <-time.After(1 * time.Second):
// p.Drop(errors.New("timeout waiting for batch to be delivered"))
// }
// }()
go func() {
wg.Wait()
select {
case c.next <- c.batchDone(p, req, hashes):
case <-c.quit:
}
}()
// only send wantedKeysMsg if all missing chunks of the previous batch arrived
// except
if c.stream.Live {
c.sessionAt = req.From
}
from, to := c.nextBatch(req.To + 1)
log.Trace("received offered batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
if from == to {
return nil
}
msg := &WantedHashesMsg{
Stream: req.Stream,
Want: want.Bytes(),
From: from,
To: to,
}
go func() {
select {
case <-time.After(120 * time.Second):
log.Warn("handleOfferedHashesMsg timeout, so dropping peer")
p.Drop(errors.New("handle offered hashes timeout"))
return
case err := <-c.next:
if err != nil {
log.Warn("c.next dropping peer", "err", err)
p.Drop(err)
return
}
case <-c.quit:
return
}
log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To)
err := p.SendPriority(msg, c.priority)
if err != nil {
log.Warn("SendPriority err, so dropping peer", "err", err)
p.Drop(err)
}
}()
return nil
}
// WantedHashesMsg is the protocol msg data for signaling which hashes
// offered in OfferedHashesMsg downstream peer actually wants sent over
type WantedHashesMsg struct {
Stream Stream
Want []byte // bitvector indicating which keys of the batch needed
From, To uint64 // next interval offset - empty if not to be continued
}
// String pretty prints WantedHashesMsg
func (m WantedHashesMsg) String() string {
return fmt.Sprintf("Stream '%v', Want: %x, Next: [%v-%v]", m.Stream, m.Want, m.From, m.To)
}
// handleWantedHashesMsg protocol msg handler
// * sends the next batch of unsynced keys
// * sends the actual data chunks as per WantedHashesMsg
func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error {
metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1)
log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To)
s, err := p.getServer(req.Stream)
if err != nil {
return err
}
hashes := s.currentBatch
// launch in go routine since GetBatch blocks until new hashes arrive
go func() {
if err := p.SendOfferedHashes(s, req.From, req.To); err != nil {
log.Warn("SendOfferedHashes dropping peer", "err", err)
p.Drop(err)
}
}()
// go p.SendOfferedHashes(s, req.From, req.To)
l := len(hashes) / HashSize
log.Trace("wanted batch length", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To, "lenhashes", len(hashes), "l", l)
want, err := bv.NewFromBytes(req.Want, l)
if err != nil {
return fmt.Errorf("error initiaising bitvector of length %v: %v", l, err)
}
for i := 0; i < l; i++ {
if want.Get(i) {
metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1)
hash := hashes[i*HashSize : (i+1)*HashSize]
data, err := s.GetData(hash)
if err != nil {
return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err)
}
chunk := storage.NewChunk(hash, nil)
chunk.SData = data
if length := len(chunk.SData); length < 9 {
log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr)
}
if err := p.Deliver(chunk, s.priority); err != nil {
return err
}
}
}
return nil
}
// Handover represents a statement that the upstream peer hands over the stream section
type Handover struct {
Stream Stream // name of stream
Start, End uint64 // index of hashes
Root []byte // Root hash for indexed segment inclusion proofs
}
// HandoverProof represents a signed statement that the upstream peer handed over the stream section
type HandoverProof struct {
Sig []byte // Sign(Hash(Serialisation(Handover)))
*Handover
}
// Takeover represents a statement that downstream peer took over (stored all data)
// handed over
type Takeover Handover
// TakeoverProof represents a signed statement that the downstream peer took over
// the stream section
type TakeoverProof struct {
Sig []byte // Sign(Hash(Serialisation(Takeover)))
*Takeover
}
// TakeoverProofMsg is the protocol msg sent by downstream peer
type TakeoverProofMsg TakeoverProof
// String pretty prints TakeoverProofMsg
func (m TakeoverProofMsg) String() string {
return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig)
}
func (p *Peer) handleTakeoverProofMsg(req *TakeoverProofMsg) error {
_, err := p.getServer(req.Stream)
// store the strongest takeoverproof for the stream in streamer
return err
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save