mirror of https://github.com/ethereum/go-ethereum
swarm: Chunk refactor (#17659)
Co-authored-by: Janos Guljas <janos@resenje.org> Co-authored-by: Balint Gabor <balint.g@gmail.com> Co-authored-by: Anton Evangelatov <anton.evangelatov@gmail.com> Co-authored-by: Viktor Trón <viktor.tron@gmail.com>pull/17669/merge
parent
ff3a5d24d2
commit
3ff2f75636
@ -0,0 +1,305 @@ |
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"sync" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log" |
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover" |
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage" |
||||||
|
) |
||||||
|
|
||||||
|
var searchTimeout = 1 * time.Second |
||||||
|
|
||||||
|
// Time to consider peer to be skipped.
|
||||||
|
// Also used in stream delivery.
|
||||||
|
var RequestTimeout = 10 * time.Second |
||||||
|
|
||||||
|
type RequestFunc func(context.Context, *Request) (*discover.NodeID, chan struct{}, error) |
||||||
|
|
||||||
|
// Fetcher is created when a chunk is not found locally. It starts a request handler loop once and
|
||||||
|
// keeps it alive until all active requests are completed. This can happen:
|
||||||
|
// 1. either because the chunk is delivered
|
||||||
|
// 2. or becuse the requestor cancelled/timed out
|
||||||
|
// Fetcher self destroys itself after it is completed.
|
||||||
|
// TODO: cancel all forward requests after termination
|
||||||
|
type Fetcher struct { |
||||||
|
protoRequestFunc RequestFunc // request function fetcher calls to issue retrieve request for a chunk
|
||||||
|
addr storage.Address // the address of the chunk to be fetched
|
||||||
|
offerC chan *discover.NodeID // channel of sources (peer node id strings)
|
||||||
|
requestC chan struct{} |
||||||
|
skipCheck bool |
||||||
|
} |
||||||
|
|
||||||
|
type Request struct { |
||||||
|
Addr storage.Address // chunk address
|
||||||
|
Source *discover.NodeID // nodeID of peer to request from (can be nil)
|
||||||
|
SkipCheck bool // whether to offer the chunk first or deliver directly
|
||||||
|
peersToSkip *sync.Map // peers not to request chunk from (only makes sense if source is nil)
|
||||||
|
} |
||||||
|
|
||||||
|
// NewRequest returns a new instance of Request based on chunk address skip check and
|
||||||
|
// a map of peers to skip.
|
||||||
|
func NewRequest(addr storage.Address, skipCheck bool, peersToSkip *sync.Map) *Request { |
||||||
|
return &Request{ |
||||||
|
Addr: addr, |
||||||
|
SkipCheck: skipCheck, |
||||||
|
peersToSkip: peersToSkip, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// SkipPeer returns if the peer with nodeID should not be requested to deliver a chunk.
|
||||||
|
// Peers to skip are kept per Request and for a time period of RequestTimeout.
|
||||||
|
// This function is used in stream package in Delivery.RequestFromPeers to optimize
|
||||||
|
// requests for chunks.
|
||||||
|
func (r *Request) SkipPeer(nodeID string) bool { |
||||||
|
val, ok := r.peersToSkip.Load(nodeID) |
||||||
|
if !ok { |
||||||
|
return false |
||||||
|
} |
||||||
|
t, ok := val.(time.Time) |
||||||
|
if ok && time.Now().After(t.Add(RequestTimeout)) { |
||||||
|
// deadine expired
|
||||||
|
r.peersToSkip.Delete(nodeID) |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// FetcherFactory is initialised with a request function and can create fetchers
|
||||||
|
type FetcherFactory struct { |
||||||
|
request RequestFunc |
||||||
|
skipCheck bool |
||||||
|
} |
||||||
|
|
||||||
|
// NewFetcherFactory takes a request function and skip check parameter and creates a FetcherFactory
|
||||||
|
func NewFetcherFactory(request RequestFunc, skipCheck bool) *FetcherFactory { |
||||||
|
return &FetcherFactory{ |
||||||
|
request: request, |
||||||
|
skipCheck: skipCheck, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// New contructs a new Fetcher, for the given chunk. All peers in peersToSkip are not requested to
|
||||||
|
// deliver the given chunk. peersToSkip should always contain the peers which are actively requesting
|
||||||
|
// this chunk, to make sure we don't request back the chunks from them.
|
||||||
|
// The created Fetcher is started and returned.
|
||||||
|
func (f *FetcherFactory) New(ctx context.Context, source storage.Address, peersToSkip *sync.Map) storage.NetFetcher { |
||||||
|
fetcher := NewFetcher(source, f.request, f.skipCheck) |
||||||
|
go fetcher.run(ctx, peersToSkip) |
||||||
|
return fetcher |
||||||
|
} |
||||||
|
|
||||||
|
// NewFetcher creates a new Fetcher for the given chunk address using the given request function.
|
||||||
|
func NewFetcher(addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher { |
||||||
|
return &Fetcher{ |
||||||
|
addr: addr, |
||||||
|
protoRequestFunc: rf, |
||||||
|
offerC: make(chan *discover.NodeID), |
||||||
|
requestC: make(chan struct{}), |
||||||
|
skipCheck: skipCheck, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Offer is called when an upstream peer offers the chunk via syncing as part of `OfferedHashesMsg` and the node does not have the chunk locally.
|
||||||
|
func (f *Fetcher) Offer(ctx context.Context, source *discover.NodeID) { |
||||||
|
// First we need to have this select to make sure that we return if context is done
|
||||||
|
select { |
||||||
|
case <-ctx.Done(): |
||||||
|
return |
||||||
|
default: |
||||||
|
} |
||||||
|
|
||||||
|
// This select alone would not guarantee that we return of context is done, it could potentially
|
||||||
|
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||||
|
select { |
||||||
|
case f.offerC <- source: |
||||||
|
case <-ctx.Done(): |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally.
|
||||||
|
func (f *Fetcher) Request(ctx context.Context) { |
||||||
|
// First we need to have this select to make sure that we return if context is done
|
||||||
|
select { |
||||||
|
case <-ctx.Done(): |
||||||
|
return |
||||||
|
default: |
||||||
|
} |
||||||
|
|
||||||
|
// This select alone would not guarantee that we return of context is done, it could potentially
|
||||||
|
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||||
|
select { |
||||||
|
case f.requestC <- struct{}{}: |
||||||
|
case <-ctx.Done(): |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// start prepares the Fetcher
|
||||||
|
// it keeps the Fetcher alive within the lifecycle of the passed context
|
||||||
|
func (f *Fetcher) run(ctx context.Context, peers *sync.Map) { |
||||||
|
var ( |
||||||
|
doRequest bool // determines if retrieval is initiated in the current iteration
|
||||||
|
wait *time.Timer // timer for search timeout
|
||||||
|
waitC <-chan time.Time // timer channel
|
||||||
|
sources []*discover.NodeID // known sources, ie. peers that offered the chunk
|
||||||
|
requested bool // true if the chunk was actually requested
|
||||||
|
) |
||||||
|
gone := make(chan *discover.NodeID) // channel to signal that a peer we requested from disconnected
|
||||||
|
|
||||||
|
// loop that keeps the fetching process alive
|
||||||
|
// after every request a timer is set. If this goes off we request again from another peer
|
||||||
|
// note that the previous request is still alive and has the chance to deliver, so
|
||||||
|
// rerequesting extends the search. ie.,
|
||||||
|
// if a peer we requested from is gone we issue a new request, so the number of active
|
||||||
|
// requests never decreases
|
||||||
|
for { |
||||||
|
select { |
||||||
|
|
||||||
|
// incoming offer
|
||||||
|
case source := <-f.offerC: |
||||||
|
log.Trace("new source", "peer addr", source, "request addr", f.addr) |
||||||
|
// 1) the chunk is offered by a syncing peer
|
||||||
|
// add to known sources
|
||||||
|
sources = append(sources, source) |
||||||
|
// launch a request to the source iff the chunk was requested (not just expected because its offered by a syncing peer)
|
||||||
|
doRequest = requested |
||||||
|
|
||||||
|
// incoming request
|
||||||
|
case <-f.requestC: |
||||||
|
log.Trace("new request", "request addr", f.addr) |
||||||
|
// 2) chunk is requested, set requested flag
|
||||||
|
// launch a request iff none been launched yet
|
||||||
|
doRequest = !requested |
||||||
|
requested = true |
||||||
|
|
||||||
|
// peer we requested from is gone. fall back to another
|
||||||
|
// and remove the peer from the peers map
|
||||||
|
case id := <-gone: |
||||||
|
log.Trace("peer gone", "peer id", id.String(), "request addr", f.addr) |
||||||
|
peers.Delete(id.String()) |
||||||
|
doRequest = requested |
||||||
|
|
||||||
|
// search timeout: too much time passed since the last request,
|
||||||
|
// extend the search to a new peer if we can find one
|
||||||
|
case <-waitC: |
||||||
|
log.Trace("search timed out: rerequesting", "request addr", f.addr) |
||||||
|
doRequest = requested |
||||||
|
|
||||||
|
// all Fetcher context closed, can quit
|
||||||
|
case <-ctx.Done(): |
||||||
|
log.Trace("terminate fetcher", "request addr", f.addr) |
||||||
|
// TODO: send cancelations to all peers left over in peers map (i.e., those we requested from)
|
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
// need to issue a new request
|
||||||
|
if doRequest { |
||||||
|
var err error |
||||||
|
sources, err = f.doRequest(ctx, gone, peers, sources) |
||||||
|
if err != nil { |
||||||
|
log.Warn("unable to request", "request addr", f.addr, "err", err) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// if wait channel is not set, set it to a timer
|
||||||
|
if requested { |
||||||
|
if wait == nil { |
||||||
|
wait = time.NewTimer(searchTimeout) |
||||||
|
defer wait.Stop() |
||||||
|
waitC = wait.C |
||||||
|
} else { |
||||||
|
// stop the timer and drain the channel if it was not drained earlier
|
||||||
|
if !wait.Stop() { |
||||||
|
select { |
||||||
|
case <-wait.C: |
||||||
|
default: |
||||||
|
} |
||||||
|
} |
||||||
|
// reset the timer to go off after searchTimeout
|
||||||
|
wait.Reset(searchTimeout) |
||||||
|
} |
||||||
|
} |
||||||
|
doRequest = false |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// doRequest attempts at finding a peer to request the chunk from
|
||||||
|
// * first it tries to request explicitly from peers that are known to have offered the chunk
|
||||||
|
// * if there are no such peers (available) it tries to request it from a peer closest to the chunk address
|
||||||
|
// excluding those in the peersToSkip map
|
||||||
|
// * if no such peer is found an error is returned
|
||||||
|
//
|
||||||
|
// if a request is successful,
|
||||||
|
// * the peer's address is added to the set of peers to skip
|
||||||
|
// * the peer's address is removed from prospective sources, and
|
||||||
|
// * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer)
|
||||||
|
func (f *Fetcher) doRequest(ctx context.Context, gone chan *discover.NodeID, peersToSkip *sync.Map, sources []*discover.NodeID) ([]*discover.NodeID, error) { |
||||||
|
var i int |
||||||
|
var sourceID *discover.NodeID |
||||||
|
var quit chan struct{} |
||||||
|
|
||||||
|
req := &Request{ |
||||||
|
Addr: f.addr, |
||||||
|
SkipCheck: f.skipCheck, |
||||||
|
peersToSkip: peersToSkip, |
||||||
|
} |
||||||
|
|
||||||
|
foundSource := false |
||||||
|
// iterate over known sources
|
||||||
|
for i = 0; i < len(sources); i++ { |
||||||
|
req.Source = sources[i] |
||||||
|
var err error |
||||||
|
sourceID, quit, err = f.protoRequestFunc(ctx, req) |
||||||
|
if err == nil { |
||||||
|
// remove the peer from known sources
|
||||||
|
// Note: we can modify the source although we are looping on it, because we break from the loop immediately
|
||||||
|
sources = append(sources[:i], sources[i+1:]...) |
||||||
|
foundSource = true |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// if there are no known sources, or none available, we try request from a closest node
|
||||||
|
if !foundSource { |
||||||
|
req.Source = nil |
||||||
|
var err error |
||||||
|
sourceID, quit, err = f.protoRequestFunc(ctx, req) |
||||||
|
if err != nil { |
||||||
|
// if no peers found to request from
|
||||||
|
return sources, err |
||||||
|
} |
||||||
|
} |
||||||
|
// add peer to the set of peers to skip from now
|
||||||
|
peersToSkip.Store(sourceID.String(), time.Now()) |
||||||
|
|
||||||
|
// if the quit channel is closed, it indicates that the source peer we requested from
|
||||||
|
// disconnected or terminated its streamer
|
||||||
|
// here start a go routine that watches this channel and reports the source peer on the gone channel
|
||||||
|
// this go routine quits if the fetcher global context is done to prevent process leak
|
||||||
|
go func() { |
||||||
|
select { |
||||||
|
case <-quit: |
||||||
|
gone <- sourceID |
||||||
|
case <-ctx.Done(): |
||||||
|
} |
||||||
|
}() |
||||||
|
return sources, nil |
||||||
|
} |
@ -0,0 +1,459 @@ |
|||||||
|
// Copyright 2018 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package network |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"sync" |
||||||
|
"testing" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/p2p/discover" |
||||||
|
) |
||||||
|
|
||||||
|
var requestedPeerID = discover.MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439") |
||||||
|
var sourcePeerID = discover.MustHexID("2dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439") |
||||||
|
|
||||||
|
// mockRequester pushes every request to the requestC channel when its doRequest function is called
|
||||||
|
type mockRequester struct { |
||||||
|
// requests []Request
|
||||||
|
requestC chan *Request // when a request is coming it is pushed to requestC
|
||||||
|
waitTimes []time.Duration // with waitTimes[i] you can define how much to wait on the ith request (optional)
|
||||||
|
ctr int //counts the number of requests
|
||||||
|
quitC chan struct{} |
||||||
|
} |
||||||
|
|
||||||
|
func newMockRequester(waitTimes ...time.Duration) *mockRequester { |
||||||
|
return &mockRequester{ |
||||||
|
requestC: make(chan *Request), |
||||||
|
waitTimes: waitTimes, |
||||||
|
quitC: make(chan struct{}), |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (m *mockRequester) doRequest(ctx context.Context, request *Request) (*discover.NodeID, chan struct{}, error) { |
||||||
|
waitTime := time.Duration(0) |
||||||
|
if m.ctr < len(m.waitTimes) { |
||||||
|
waitTime = m.waitTimes[m.ctr] |
||||||
|
m.ctr++ |
||||||
|
} |
||||||
|
time.Sleep(waitTime) |
||||||
|
m.requestC <- request |
||||||
|
|
||||||
|
// if there is a Source in the request use that, if not use the global requestedPeerId
|
||||||
|
source := request.Source |
||||||
|
if source == nil { |
||||||
|
source = &requestedPeerID |
||||||
|
} |
||||||
|
return source, m.quitC, nil |
||||||
|
} |
||||||
|
|
||||||
|
// TestFetcherSingleRequest creates a Fetcher using mockRequester, and run it with a sample set of peers to skip.
|
||||||
|
// mockRequester pushes a Request on a channel every time the request function is called. Using
|
||||||
|
// this channel we test if calling Fetcher.Request calls the request function, and whether it uses
|
||||||
|
// the correct peers to skip which we provided for the fetcher.run function.
|
||||||
|
func TestFetcherSingleRequest(t *testing.T) { |
||||||
|
requester := newMockRequester() |
||||||
|
addr := make([]byte, 32) |
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true) |
||||||
|
|
||||||
|
peers := []string{"a", "b", "c", "d"} |
||||||
|
peersToSkip := &sync.Map{} |
||||||
|
for _, p := range peers { |
||||||
|
peersToSkip.Store(p, time.Now()) |
||||||
|
} |
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background()) |
||||||
|
defer cancel() |
||||||
|
|
||||||
|
go fetcher.run(ctx, peersToSkip) |
||||||
|
|
||||||
|
rctx := context.Background() |
||||||
|
fetcher.Request(rctx) |
||||||
|
|
||||||
|
select { |
||||||
|
case request := <-requester.requestC: |
||||||
|
// request should contain all peers from peersToSkip provided to the fetcher
|
||||||
|
for _, p := range peers { |
||||||
|
if _, ok := request.peersToSkip.Load(p); !ok { |
||||||
|
t.Fatalf("request.peersToSkip misses peer") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// source peer should be also added to peersToSkip eventually
|
||||||
|
time.Sleep(100 * time.Millisecond) |
||||||
|
if _, ok := request.peersToSkip.Load(requestedPeerID.String()); !ok { |
||||||
|
t.Fatalf("request.peersToSkip does not contain peer returned by the request function") |
||||||
|
} |
||||||
|
|
||||||
|
// fetch should trigger a request, if it doesn't happen in time, test should fail
|
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
t.Fatalf("fetch timeout") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestCancelStopsFetcher tests that a cancelled fetcher does not initiate further requests even if its fetch function is called
|
||||||
|
func TestFetcherCancelStopsFetcher(t *testing.T) { |
||||||
|
requester := newMockRequester() |
||||||
|
addr := make([]byte, 32) |
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true) |
||||||
|
|
||||||
|
peersToSkip := &sync.Map{} |
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background()) |
||||||
|
|
||||||
|
// we start the fetcher, and then we immediately cancel the context
|
||||||
|
go fetcher.run(ctx, peersToSkip) |
||||||
|
cancel() |
||||||
|
|
||||||
|
rctx, rcancel := context.WithTimeout(ctx, 100*time.Millisecond) |
||||||
|
defer rcancel() |
||||||
|
// we call Request with an active context
|
||||||
|
fetcher.Request(rctx) |
||||||
|
|
||||||
|
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
t.Fatalf("cancelled fetcher initiated request") |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestFetchCancelStopsRequest tests that calling a Request function with a cancelled context does not initiate a request
|
||||||
|
func TestFetcherCancelStopsRequest(t *testing.T) { |
||||||
|
requester := newMockRequester(100 * time.Millisecond) |
||||||
|
addr := make([]byte, 32) |
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true) |
||||||
|
|
||||||
|
peersToSkip := &sync.Map{} |
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background()) |
||||||
|
defer cancel() |
||||||
|
|
||||||
|
// we start the fetcher with an active context
|
||||||
|
go fetcher.run(ctx, peersToSkip) |
||||||
|
|
||||||
|
rctx, rcancel := context.WithCancel(context.Background()) |
||||||
|
rcancel() |
||||||
|
|
||||||
|
// we call Request with a cancelled context
|
||||||
|
fetcher.Request(rctx) |
||||||
|
|
||||||
|
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
t.Fatalf("cancelled fetch function initiated request") |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
} |
||||||
|
|
||||||
|
// if there is another Request with active context, there should be a request, because the fetcher itself is not cancelled
|
||||||
|
rctx = context.Background() |
||||||
|
fetcher.Request(rctx) |
||||||
|
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
t.Fatalf("expected request") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestOfferUsesSource tests Fetcher Offer behavior.
|
||||||
|
// In this case there should be 1 (and only one) request initiated from the source peer, and the
|
||||||
|
// source nodeid should appear in the peersToSkip map.
|
||||||
|
func TestFetcherOfferUsesSource(t *testing.T) { |
||||||
|
requester := newMockRequester(100 * time.Millisecond) |
||||||
|
addr := make([]byte, 32) |
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true) |
||||||
|
|
||||||
|
peersToSkip := &sync.Map{} |
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background()) |
||||||
|
defer cancel() |
||||||
|
|
||||||
|
// start the fetcher
|
||||||
|
go fetcher.run(ctx, peersToSkip) |
||||||
|
|
||||||
|
rctx := context.Background() |
||||||
|
// call the Offer function with the source peer
|
||||||
|
fetcher.Offer(rctx, &sourcePeerID) |
||||||
|
|
||||||
|
// fetcher should not initiate request
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
t.Fatalf("fetcher initiated request") |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
} |
||||||
|
|
||||||
|
// call Request after the Offer
|
||||||
|
rctx = context.Background() |
||||||
|
fetcher.Request(rctx) |
||||||
|
|
||||||
|
// there should be exactly 1 request coming from fetcher
|
||||||
|
var request *Request |
||||||
|
select { |
||||||
|
case request = <-requester.requestC: |
||||||
|
if *request.Source != sourcePeerID { |
||||||
|
t.Fatalf("Expected source id %v got %v", sourcePeerID, request.Source) |
||||||
|
} |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
t.Fatalf("fetcher did not initiate request") |
||||||
|
} |
||||||
|
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
t.Fatalf("Fetcher number of requests expected 1 got 2") |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
} |
||||||
|
|
||||||
|
// source peer should be added to peersToSkip eventually
|
||||||
|
time.Sleep(100 * time.Millisecond) |
||||||
|
if _, ok := request.peersToSkip.Load(sourcePeerID.String()); !ok { |
||||||
|
t.Fatalf("SourcePeerId not added to peersToSkip") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) { |
||||||
|
requester := newMockRequester(100 * time.Millisecond) |
||||||
|
addr := make([]byte, 32) |
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true) |
||||||
|
|
||||||
|
peersToSkip := &sync.Map{} |
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background()) |
||||||
|
defer cancel() |
||||||
|
|
||||||
|
// start the fetcher
|
||||||
|
go fetcher.run(ctx, peersToSkip) |
||||||
|
|
||||||
|
// call Request first
|
||||||
|
rctx := context.Background() |
||||||
|
fetcher.Request(rctx) |
||||||
|
|
||||||
|
// there should be a request coming from fetcher
|
||||||
|
var request *Request |
||||||
|
select { |
||||||
|
case request = <-requester.requestC: |
||||||
|
if request.Source != nil { |
||||||
|
t.Fatalf("Incorrect source peer id, expected nil got %v", request.Source) |
||||||
|
} |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
t.Fatalf("fetcher did not initiate request") |
||||||
|
} |
||||||
|
|
||||||
|
// after the Request call Offer
|
||||||
|
fetcher.Offer(context.Background(), &sourcePeerID) |
||||||
|
|
||||||
|
// there should be a request coming from fetcher
|
||||||
|
select { |
||||||
|
case request = <-requester.requestC: |
||||||
|
if *request.Source != sourcePeerID { |
||||||
|
t.Fatalf("Incorrect source peer id, expected %v got %v", sourcePeerID, request.Source) |
||||||
|
} |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
t.Fatalf("fetcher did not initiate request") |
||||||
|
} |
||||||
|
|
||||||
|
// source peer should be added to peersToSkip eventually
|
||||||
|
time.Sleep(100 * time.Millisecond) |
||||||
|
if _, ok := request.peersToSkip.Load(sourcePeerID.String()); !ok { |
||||||
|
t.Fatalf("SourcePeerId not added to peersToSkip") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestFetcherRetryOnTimeout tests that fetch retries after searchTimeOut has passed
|
||||||
|
func TestFetcherRetryOnTimeout(t *testing.T) { |
||||||
|
requester := newMockRequester() |
||||||
|
addr := make([]byte, 32) |
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true) |
||||||
|
|
||||||
|
peersToSkip := &sync.Map{} |
||||||
|
|
||||||
|
// set searchTimeOut to low value so the test is quicker
|
||||||
|
defer func(t time.Duration) { |
||||||
|
searchTimeout = t |
||||||
|
}(searchTimeout) |
||||||
|
searchTimeout = 250 * time.Millisecond |
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background()) |
||||||
|
defer cancel() |
||||||
|
|
||||||
|
// start the fetcher
|
||||||
|
go fetcher.run(ctx, peersToSkip) |
||||||
|
|
||||||
|
// call the fetch function with an active context
|
||||||
|
rctx := context.Background() |
||||||
|
fetcher.Request(rctx) |
||||||
|
|
||||||
|
// after 100ms the first request should be initiated
|
||||||
|
time.Sleep(100 * time.Millisecond) |
||||||
|
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
default: |
||||||
|
t.Fatalf("fetch did not initiate request") |
||||||
|
} |
||||||
|
|
||||||
|
// after another 100ms no new request should be initiated, because search timeout is 250ms
|
||||||
|
time.Sleep(100 * time.Millisecond) |
||||||
|
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
t.Fatalf("unexpected request from fetcher") |
||||||
|
default: |
||||||
|
} |
||||||
|
|
||||||
|
// after another 300ms search timeout is over, there should be a new request
|
||||||
|
time.Sleep(300 * time.Millisecond) |
||||||
|
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
default: |
||||||
|
t.Fatalf("fetch did not retry request") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestFetcherFactory creates a FetcherFactory and checks if the factory really creates and starts
|
||||||
|
// a Fetcher when it return a fetch function. We test the fetching functionality just by checking if
|
||||||
|
// a request is initiated when the fetch function is called
|
||||||
|
func TestFetcherFactory(t *testing.T) { |
||||||
|
requester := newMockRequester(100 * time.Millisecond) |
||||||
|
addr := make([]byte, 32) |
||||||
|
fetcherFactory := NewFetcherFactory(requester.doRequest, false) |
||||||
|
|
||||||
|
peersToSkip := &sync.Map{} |
||||||
|
|
||||||
|
fetcher := fetcherFactory.New(context.Background(), addr, peersToSkip) |
||||||
|
|
||||||
|
fetcher.Request(context.Background()) |
||||||
|
|
||||||
|
// check if the created fetchFunction really starts a fetcher and initiates a request
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
t.Fatalf("fetch timeout") |
||||||
|
} |
||||||
|
|
||||||
|
} |
||||||
|
|
||||||
|
func TestFetcherRequestQuitRetriesRequest(t *testing.T) { |
||||||
|
requester := newMockRequester() |
||||||
|
addr := make([]byte, 32) |
||||||
|
fetcher := NewFetcher(addr, requester.doRequest, true) |
||||||
|
|
||||||
|
// make sure searchTimeout is long so it is sure the request is not retried because of timeout
|
||||||
|
defer func(t time.Duration) { |
||||||
|
searchTimeout = t |
||||||
|
}(searchTimeout) |
||||||
|
searchTimeout = 10 * time.Second |
||||||
|
|
||||||
|
peersToSkip := &sync.Map{} |
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background()) |
||||||
|
defer cancel() |
||||||
|
|
||||||
|
go fetcher.run(ctx, peersToSkip) |
||||||
|
|
||||||
|
rctx := context.Background() |
||||||
|
fetcher.Request(rctx) |
||||||
|
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
t.Fatalf("request is not initiated") |
||||||
|
} |
||||||
|
|
||||||
|
close(requester.quitC) |
||||||
|
|
||||||
|
select { |
||||||
|
case <-requester.requestC: |
||||||
|
case <-time.After(200 * time.Millisecond): |
||||||
|
t.Fatalf("request is not initiated after failed request") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestRequestSkipPeer checks if PeerSkip function will skip provided peer
|
||||||
|
// and not skip unknown one.
|
||||||
|
func TestRequestSkipPeer(t *testing.T) { |
||||||
|
addr := make([]byte, 32) |
||||||
|
peers := []discover.NodeID{ |
||||||
|
discover.MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), |
||||||
|
discover.MustHexID("2dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), |
||||||
|
} |
||||||
|
|
||||||
|
peersToSkip := new(sync.Map) |
||||||
|
peersToSkip.Store(peers[0].String(), time.Now()) |
||||||
|
r := NewRequest(addr, false, peersToSkip) |
||||||
|
|
||||||
|
if !r.SkipPeer(peers[0].String()) { |
||||||
|
t.Errorf("peer not skipped") |
||||||
|
} |
||||||
|
|
||||||
|
if r.SkipPeer(peers[1].String()) { |
||||||
|
t.Errorf("peer skipped") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestRequestSkipPeerExpired checks if a peer to skip is not skipped
|
||||||
|
// after RequestTimeout has passed.
|
||||||
|
func TestRequestSkipPeerExpired(t *testing.T) { |
||||||
|
addr := make([]byte, 32) |
||||||
|
peer := discover.MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439") |
||||||
|
|
||||||
|
// set RequestTimeout to a low value and reset it after the test
|
||||||
|
defer func(t time.Duration) { RequestTimeout = t }(RequestTimeout) |
||||||
|
RequestTimeout = 250 * time.Millisecond |
||||||
|
|
||||||
|
peersToSkip := new(sync.Map) |
||||||
|
peersToSkip.Store(peer.String(), time.Now()) |
||||||
|
r := NewRequest(addr, false, peersToSkip) |
||||||
|
|
||||||
|
if !r.SkipPeer(peer.String()) { |
||||||
|
t.Errorf("peer not skipped") |
||||||
|
} |
||||||
|
|
||||||
|
time.Sleep(500 * time.Millisecond) |
||||||
|
|
||||||
|
if r.SkipPeer(peer.String()) { |
||||||
|
t.Errorf("peer skipped") |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestRequestSkipPeerPermanent checks if a peer to skip is not skipped
|
||||||
|
// after RequestTimeout is not skipped if it is set for a permanent skipping
|
||||||
|
// by value to peersToSkip map is not time.Duration.
|
||||||
|
func TestRequestSkipPeerPermanent(t *testing.T) { |
||||||
|
addr := make([]byte, 32) |
||||||
|
peer := discover.MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439") |
||||||
|
|
||||||
|
// set RequestTimeout to a low value and reset it after the test
|
||||||
|
defer func(t time.Duration) { RequestTimeout = t }(RequestTimeout) |
||||||
|
RequestTimeout = 250 * time.Millisecond |
||||||
|
|
||||||
|
peersToSkip := new(sync.Map) |
||||||
|
peersToSkip.Store(peer.String(), true) |
||||||
|
r := NewRequest(addr, false, peersToSkip) |
||||||
|
|
||||||
|
if !r.SkipPeer(peer.String()) { |
||||||
|
t.Errorf("peer not skipped") |
||||||
|
} |
||||||
|
|
||||||
|
time.Sleep(500 * time.Millisecond) |
||||||
|
|
||||||
|
if !r.SkipPeer(peer.String()) { |
||||||
|
t.Errorf("peer not skipped") |
||||||
|
} |
||||||
|
} |
@ -1,69 +0,0 @@ |
|||||||
// Copyright 2016 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package storage |
|
||||||
|
|
||||||
import ( |
|
||||||
"context" |
|
||||||
"sync" |
|
||||||
) |
|
||||||
|
|
||||||
/* |
|
||||||
ChunkStore interface is implemented by : |
|
||||||
|
|
||||||
- MemStore: a memory cache |
|
||||||
- DbStore: local disk/db store |
|
||||||
- LocalStore: a combination (sequence of) memStore and dbStore |
|
||||||
- NetStore: cloud storage abstraction layer |
|
||||||
- FakeChunkStore: dummy store which doesn't store anything just implements the interface |
|
||||||
*/ |
|
||||||
type ChunkStore interface { |
|
||||||
Put(context.Context, *Chunk) // effectively there is no error even if there is an error
|
|
||||||
Get(context.Context, Address) (*Chunk, error) |
|
||||||
Close() |
|
||||||
} |
|
||||||
|
|
||||||
// MapChunkStore is a very simple ChunkStore implementation to store chunks in a map in memory.
|
|
||||||
type MapChunkStore struct { |
|
||||||
chunks map[string]*Chunk |
|
||||||
mu sync.RWMutex |
|
||||||
} |
|
||||||
|
|
||||||
func NewMapChunkStore() *MapChunkStore { |
|
||||||
return &MapChunkStore{ |
|
||||||
chunks: make(map[string]*Chunk), |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
func (m *MapChunkStore) Put(ctx context.Context, chunk *Chunk) { |
|
||||||
m.mu.Lock() |
|
||||||
defer m.mu.Unlock() |
|
||||||
m.chunks[chunk.Addr.Hex()] = chunk |
|
||||||
chunk.markAsStored() |
|
||||||
} |
|
||||||
|
|
||||||
func (m *MapChunkStore) Get(ctx context.Context, addr Address) (*Chunk, error) { |
|
||||||
m.mu.RLock() |
|
||||||
defer m.mu.RUnlock() |
|
||||||
chunk := m.chunks[addr.Hex()] |
|
||||||
if chunk == nil { |
|
||||||
return nil, ErrChunkNotFound |
|
||||||
} |
|
||||||
return chunk, nil |
|
||||||
} |
|
||||||
|
|
||||||
func (m *MapChunkStore) Close() { |
|
||||||
} |
|
@ -1,44 +0,0 @@ |
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
package storage |
|
||||||
|
|
||||||
import ( |
|
||||||
"context" |
|
||||||
"sync" |
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/log" |
|
||||||
) |
|
||||||
|
|
||||||
// PutChunks adds chunks to localstore
|
|
||||||
// It waits for receive on the stored channel
|
|
||||||
// It logs but does not fail on delivery error
|
|
||||||
func PutChunks(store *LocalStore, chunks ...*Chunk) { |
|
||||||
wg := sync.WaitGroup{} |
|
||||||
wg.Add(len(chunks)) |
|
||||||
go func() { |
|
||||||
for _, c := range chunks { |
|
||||||
<-c.dbStoredC |
|
||||||
if err := c.GetErrored(); err != nil { |
|
||||||
log.Error("chunk store fail", "err", err, "key", c.Addr) |
|
||||||
} |
|
||||||
wg.Done() |
|
||||||
} |
|
||||||
}() |
|
||||||
for _, c := range chunks { |
|
||||||
go store.Put(context.TODO(), c) |
|
||||||
} |
|
||||||
wg.Wait() |
|
||||||
} |
|
@ -1,54 +0,0 @@ |
|||||||
// Copyright 2018 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package storage |
|
||||||
|
|
||||||
import "context" |
|
||||||
|
|
||||||
// wrapper of db-s to provide mockable custom local chunk store access to syncer
|
|
||||||
type DBAPI struct { |
|
||||||
db *LDBStore |
|
||||||
loc *LocalStore |
|
||||||
} |
|
||||||
|
|
||||||
func NewDBAPI(loc *LocalStore) *DBAPI { |
|
||||||
return &DBAPI{loc.DbStore, loc} |
|
||||||
} |
|
||||||
|
|
||||||
// to obtain the chunks from address or request db entry only
|
|
||||||
func (d *DBAPI) Get(ctx context.Context, addr Address) (*Chunk, error) { |
|
||||||
return d.loc.Get(ctx, addr) |
|
||||||
} |
|
||||||
|
|
||||||
// current storage counter of chunk db
|
|
||||||
func (d *DBAPI) CurrentBucketStorageIndex(po uint8) uint64 { |
|
||||||
return d.db.CurrentBucketStorageIndex(po) |
|
||||||
} |
|
||||||
|
|
||||||
// iteration storage counter and proximity order
|
|
||||||
func (d *DBAPI) Iterator(from uint64, to uint64, po uint8, f func(Address, uint64) bool) error { |
|
||||||
return d.db.SyncIterator(from, to, po, f) |
|
||||||
} |
|
||||||
|
|
||||||
// to obtain the chunks from address or request db entry only
|
|
||||||
func (d *DBAPI) GetOrCreateRequest(ctx context.Context, addr Address) (*Chunk, bool) { |
|
||||||
return d.loc.GetOrCreateRequest(ctx, addr) |
|
||||||
} |
|
||||||
|
|
||||||
// to obtain the chunks from key or request db entry only
|
|
||||||
func (d *DBAPI) Put(ctx context.Context, chunk *Chunk) { |
|
||||||
d.loc.Put(ctx, chunk) |
|
||||||
} |
|
Loading…
Reference in new issue