@ -23,22 +23,20 @@ import (
"io/ioutil"
"math/rand"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/testutil"
"github.com/pborman/uuid"
cli "gopkg.in/urfave/cli.v1"
)
func uploadAndSyncCmd ( ctx * cli . Context , tuid string ) error {
func uploadAndSyncCmd ( ctx * cli . Context ) error {
// use input seed if it has been set
if inputSeed != 0 {
seed = inputSeed
@ -49,7 +47,7 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
errc := make ( chan error )
go func ( ) {
errc <- uploadAndSync ( ctx , randomBytes , tuid )
errc <- uploadAndSync ( ctx , randomBytes )
} ( )
var err error
@ -65,7 +63,7 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
}
// trigger debug functionality on randomBytes
e := trackChunks ( randomBytes [ : ] )
e := trackChunks ( randomBytes [ : ] , true )
if e != nil {
log . Error ( e . Error ( ) )
}
@ -73,50 +71,84 @@ func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
return err
}
func trackChunks ( testData [ ] byte ) error {
func trackChunks ( testData [ ] byte , submitMetrics bool ) error {
addrs , err := getAllRefs ( testData )
if err != nil {
return err
}
for i , ref := range addrs {
log . Trace ( fmt . Sprintf ( "ref %d" , i ) , "ref" , ref )
log . Debug ( fmt . Sprintf ( "ref %d" , i ) , "ref" , ref )
}
var globalYes , globalNo int
var globalMu sync . Mutex
var hasErr bool
var wg sync . WaitGroup
wg . Add ( len ( hosts ) )
for _ , host := range hosts {
httpHost := fmt . Sprintf ( "ws://%s:%d" , host , 8546 )
host := host
go func ( ) {
defer wg . Done ( )
httpHost := fmt . Sprintf ( "ws://%s:%d" , host , 8546 )
ctx , cancel := context . WithTimeout ( context . Background ( ) , 3 * time . Second )
defer cancel ( )
rpcClient , err := rpc . DialContext ( ctx , httpHost )
if rpcClient != nil {
defer rpcClient . Close ( )
}
if err != nil {
log . Error ( "error dialing host" , "err" , err , "host" , httpHost )
hasErr = true
return
}
hostChunks := [ ] string { }
var hostChunks string
err = rpcClient . Call ( & hostChunks , "bzz_has" , addrs )
if err != nil {
log . Error ( "error calling rpc client" , "err" , err , "host" , httpHost )
hasErr = true
return
}
rpcClient , err := rpc . Dial ( httpHost )
if err != nil {
log . Error ( "error dialing host" , "err" , err , "host" , httpHost )
continue
}
yes , no := 0 , 0
for _ , val := range hostChunks {
if val == '1' {
yes ++
} else {
no ++
}
}
var hasInfo [ ] api . HasInfo
err = rpcClient . Call ( & hasInfo , "bzz_has" , addrs )
if err != nil {
log . Error ( "error calling rpc client" , "err" , err , "host" , httpHost )
continue
}
if no == 0 {
log . Info ( "host reported to have all chunks" , "host" , host )
}
count := 0
for _ , info := range hasInfo {
if info . Ha s {
hostChunks = append ( hostChunks , "1" )
} else {
hostChunks = append ( hostChunks , "0" )
count ++
log . Debug ( "chunks" , "chunks" , hostChunks , "yes" , yes , "no" , no , "host" , host )
if submitMetric s {
globalMu . Lock ( )
globalYes += yes
globalNo += no
globalMu . Unlock ( )
}
}
} ( )
}
if count == 0 {
log . Info ( "host reported to have all chunks" , "host" , host )
}
wg . Wait ( )
if ! hasErr && submitMetrics {
// remove the chunks stored on the uploader node
globalYes -= len ( addrs )
log . Trace ( "chunks" , "chunks" , strings . Join ( hostChunks , "" ) , "host" , host )
metrics . GetOrRegisterCounter ( "deployment.chunks.yes" , nil ) . Inc ( int64 ( globalYes ) )
metrics . GetOrRegisterCounter ( "deployment.chunks.no" , nil ) . Inc ( int64 ( globalNo ) )
metrics . GetOrRegisterCounter ( "deployment.chunks.refs" , nil ) . Inc ( int64 ( len ( addrs ) ) )
}
return nil
}
@ -130,15 +162,13 @@ func getAllRefs(testData []byte) (storage.AddressCollection, error) {
if err != nil {
return nil , err
}
ctx , cancel := context . WithTimeout ( context . Background ( ) , time . Duration ( trackTimeout ) * time . Second )
defer cancel ( )
reader := bytes . NewReader ( testData )
return fileStore . GetAllReferences ( ctx , reader , false )
return fileStore . GetAllReferences ( con te xt . Background ( ) , reader , false )
}
func uploadAndSync ( c * cli . Context , randomBytes [ ] byte , tuid string ) error {
log . Info ( "uploading to " + httpEndpoint ( hosts [ 0 ] ) + " and syncing" , "tuid" , tuid , " seed" , seed )
func uploadAndSync ( c * cli . Context , randomBytes [ ] byte ) error {
log . Info ( "uploading to " + httpEndpoint ( hosts [ 0 ] ) + " and syncing" , "seed" , seed )
t1 := time . Now ( )
hash , err := upload ( randomBytes , httpEndpoint ( hosts [ 0 ] ) )
@ -155,53 +185,91 @@ func uploadAndSync(c *cli.Context, randomBytes []byte, tuid string) error {
return err
}
log . Info ( "uploaded successfully" , "tuid" , tuid , " hash" , hash , "took" , t2 , "digest" , fmt . Sprintf ( "%x" , fhash ) )
log . Info ( "uploaded successfully" , "hash" , hash , "took" , t2 , "digest" , fmt . Sprintf ( "%x" , fhash ) )
time . Sleep ( time . Duration ( syncDelay ) * time . Second )
waitToSync ( )
wg := sync . WaitGroup { }
if single {
randIndex := 1 + rand . Intn ( len ( hosts ) - 1 )
ruid := uuid . New ( ) [ : 8 ]
wg . Add ( 1 )
go func ( endpoint string , ruid string ) {
for {
start := time . Now ( )
err := fetch ( hash , endpoint , fhash , ruid , tuid )
if err != nil {
continue
}
ended := time . Since ( start )
log . Debug ( "chunks before fetch attempt" , "hash" , hash )
metrics . GetOrRegisterResettingTimer ( "upload-and-sync.single.fetch-time" , nil ) . Update ( ended )
log . Info ( "fetch successful" , "tuid" , tuid , "ruid" , ruid , "took" , ended , "endpoint" , endpoint )
wg . Done ( )
return
}
} ( httpEndpoint ( hosts [ randIndex ] ) , ruid )
} else {
for _ , endpoint := range hosts [ 1 : ] {
ruid := uuid . New ( ) [ : 8 ]
wg . Add ( 1 )
go func ( endpoint string , ruid string ) {
for {
start := time . Now ( )
err := fetch ( hash , endpoint , fhash , ruid , tuid )
if err != nil {
continue
}
ended := time . Since ( start )
metrics . GetOrRegisterResettingTimer ( "upload-and-sync.each.fetch-time" , nil ) . Update ( ended )
log . Info ( "fetch successful" , "tuid" , tuid , "ruid" , ruid , "took" , ended , "endpoint" , endpoint )
wg . Done ( )
return
}
} ( httpEndpoint ( endpoint ) , ruid )
err = trackChunks ( randomBytes , false )
if err != nil {
log . Error ( err . Error ( ) )
}
if onlyUpload {
log . Debug ( "only-upload is true, stoppping test" , "hash" , hash )
return nil
}
randIndex := 1 + rand . Intn ( len ( hosts ) - 1 )
for {
start := time . Now ( )
err := fetch ( hash , httpEndpoint ( hosts [ randIndex ] ) , fhash , "" )
if err != nil {
time . Sleep ( 2 * time . Second )
continue
}
ended := time . Since ( start )
metrics . GetOrRegisterResettingTimer ( "upload-and-sync.single.fetch-time" , nil ) . Update ( ended )
log . Info ( "fetch successful" , "took" , ended , "endpoint" , httpEndpoint ( hosts [ randIndex ] ) )
break
}
wg . Wait ( )
log . Info ( "all hosts synced random file successfully" )
return nil
}
func isSyncing ( wsHost string ) ( bool , error ) {
rpcClient , err := rpc . Dial ( wsHost )
if rpcClient != nil {
defer rpcClient . Close ( )
}
if err != nil {
log . Error ( "error dialing host" , "err" , err )
return false , err
}
var isSyncing bool
err = rpcClient . Call ( & isSyncing , "bzz_isSyncing" )
if err != nil {
log . Error ( "error calling host for isSyncing" , "err" , err )
return false , err
}
log . Debug ( "isSyncing result" , "host" , wsHost , "isSyncing" , isSyncing )
return isSyncing , nil
}
func waitToSync ( ) {
t1 := time . Now ( )
ns := uint64 ( 1 )
for ns > 0 {
time . Sleep ( 3 * time . Second )
notSynced := uint64 ( 0 )
var wg sync . WaitGroup
wg . Add ( len ( hosts ) )
for i := 0 ; i < len ( hosts ) ; i ++ {
i := i
go func ( idx int ) {
stillSyncing , err := isSyncing ( wsEndpoint ( hosts [ idx ] ) )
if stillSyncing || err != nil {
atomic . AddUint64 ( & notSynced , 1 )
}
wg . Done ( )
} ( i )
}
wg . Wait ( )
ns = atomic . LoadUint64 ( & notSynced )
}
t2 := time . Since ( t1 )
metrics . GetOrRegisterResettingTimer ( "upload-and-sync.single.wait-for-sync.deployment" , nil ) . Update ( t2 )
}