@ -37,25 +37,79 @@ import (
)
var (
testdb , _ = ethdb . NewMemDatabase ( )
testKey , _ = crypto . HexToECDSA ( "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" )
testAddress = crypto . PubkeyToAddress ( testKey . PublicKey )
genesis = core . GenesisBlockForTesting ( testdb , testAddress , big . NewInt ( 1000000000 ) )
)
// Reduce some of the parameters to make the tester faster.
func init ( ) {
MaxForkAncestry = uint64 ( 10000 )
blockCacheLimit = 1024
fsCriticalTrials = 10
}
// downloadTester is a test simulator for mocking out local block chain.
type downloadTester struct {
downloader * Downloader
genesis * types . Block // Genesis blocks used by the tester and peers
stateDb ethdb . Database // Database used by the tester for syncing from peers
peerDb ethdb . Database // Database of the peers containing all data
ownHashes [ ] common . Hash // Hash chain belonging to the tester
ownHeaders map [ common . Hash ] * types . Header // Headers belonging to the tester
ownBlocks map [ common . Hash ] * types . Block // Blocks belonging to the tester
ownReceipts map [ common . Hash ] types . Receipts // Receipts belonging to the tester
ownChainTd map [ common . Hash ] * big . Int // Total difficulties of the blocks in the local chain
peerHashes map [ string ] [ ] common . Hash // Hash chain belonging to different test peers
peerHeaders map [ string ] map [ common . Hash ] * types . Header // Headers belonging to different test peers
peerBlocks map [ string ] map [ common . Hash ] * types . Block // Blocks belonging to different test peers
peerReceipts map [ string ] map [ common . Hash ] types . Receipts // Receipts belonging to different test peers
peerChainTds map [ string ] map [ common . Hash ] * big . Int // Total difficulties of the blocks in the peer chains
peerMissingStates map [ string ] map [ common . Hash ] bool // State entries that fast sync should not return
lock sync . RWMutex
}
// newTester creates a new downloader test mocker.
func newTester ( ) * downloadTester {
testdb , _ := ethdb . NewMemDatabase ( )
genesis := core . GenesisBlockForTesting ( testdb , testAddress , big . NewInt ( 1000000000 ) )
tester := & downloadTester {
genesis : genesis ,
peerDb : testdb ,
ownHashes : [ ] common . Hash { genesis . Hash ( ) } ,
ownHeaders : map [ common . Hash ] * types . Header { genesis . Hash ( ) : genesis . Header ( ) } ,
ownBlocks : map [ common . Hash ] * types . Block { genesis . Hash ( ) : genesis } ,
ownReceipts : map [ common . Hash ] types . Receipts { genesis . Hash ( ) : nil } ,
ownChainTd : map [ common . Hash ] * big . Int { genesis . Hash ( ) : genesis . Difficulty ( ) } ,
peerHashes : make ( map [ string ] [ ] common . Hash ) ,
peerHeaders : make ( map [ string ] map [ common . Hash ] * types . Header ) ,
peerBlocks : make ( map [ string ] map [ common . Hash ] * types . Block ) ,
peerReceipts : make ( map [ string ] map [ common . Hash ] types . Receipts ) ,
peerChainTds : make ( map [ string ] map [ common . Hash ] * big . Int ) ,
peerMissingStates : make ( map [ string ] map [ common . Hash ] bool ) ,
}
tester . stateDb , _ = ethdb . NewMemDatabase ( )
tester . stateDb . Put ( genesis . Root ( ) . Bytes ( ) , [ ] byte { 0x00 } )
tester . downloader = New ( tester . stateDb , new ( event . TypeMux ) , tester . hasHeader , tester . hasBlock , tester . getHeader ,
tester . getBlock , tester . headHeader , tester . headBlock , tester . headFastBlock , tester . commitHeadBlock , tester . getTd ,
tester . insertHeaders , tester . insertBlocks , tester . insertReceipts , tester . rollback , tester . dropPeer )
return tester
}
// makeChain creates a chain of n blocks starting at and including parent.
// the returned hash chain is ordered head->parent. In addition, every 3rd block
// contains a transaction and every 5th an uncle to allow testing correct block
// reassembly.
func makeChain ( n int , seed byte , parent * types . Block , parentReceipts types . Receipts , heavy bool ) ( [ ] common . Hash , map [ common . Hash ] * types . Header , map [ common . Hash ] * types . Block , map [ common . Hash ] types . Receipts ) {
func ( dl * downloadTester ) makeChain ( n int , seed byte , parent * types . Block , parentReceipts types . Receipts , heavy bool ) ( [ ] common . Hash , map [ common . Hash ] * types . Header , map [ common . Hash ] * types . Block , map [ common . Hash ] types . Receipts ) {
// Generate the block chain
blocks , receipts := core . GenerateChain ( nil , parent , testdb , n , func ( i int , block * core . BlockGen ) {
blocks , receipts := core . GenerateChain ( nil , parent , dl . peerD b, n , func ( i int , block * core . BlockGen ) {
block . SetCoinbase ( common . Address { seed } )
// If a heavy chain is requested, delay blocks to raise difficulty
@ -63,7 +117,7 @@ func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Recei
block . OffsetTime ( - 1 )
}
// If the block number is multiple of 3, send a bonus transaction to the miner
if parent == genesis && i % 3 == 0 {
if parent == dl . genesis && i % 3 == 0 {
tx , err := types . NewTransaction ( block . TxNonce ( testAddress ) , common . Address { seed } , big . NewInt ( 1000 ) , params . TxGas , nil , nil ) . SignECDSA ( testKey )
if err != nil {
panic ( err )
@ -102,19 +156,19 @@ func makeChain(n int, seed byte, parent *types.Block, parentReceipts types.Recei
// makeChainFork creates two chains of length n, such that h1[:f] and
// h2[:f] are different but have a common suffix of length n-f.
func makeChainFork ( n , f int , parent * types . Block , parentReceipts types . Receipts , balanced bool ) ( [ ] common . Hash , [ ] common . Hash , map [ common . Hash ] * types . Header , map [ common . Hash ] * types . Header , map [ common . Hash ] * types . Block , map [ common . Hash ] * types . Block , map [ common . Hash ] types . Receipts , map [ common . Hash ] types . Receipts ) {
func ( dl * downloadTester ) makeChainFork ( n , f int , parent * types . Block , parentReceipts types . Receipts , balanced bool ) ( [ ] common . Hash , [ ] common . Hash , map [ common . Hash ] * types . Header , map [ common . Hash ] * types . Header , map [ common . Hash ] * types . Block , map [ common . Hash ] * types . Block , map [ common . Hash ] types . Receipts , map [ common . Hash ] types . Receipts ) {
// Create the common suffix
hashes , headers , blocks , receipts := makeChain ( n - f , 0 , parent , parentReceipts , false )
hashes , headers , blocks , receipts := dl . makeChain ( n - f , 0 , parent , parentReceipts , false )
// Create the forks, making the second heavyer if non balanced forks were requested
hashes1 , headers1 , blocks1 , receipts1 := makeChain ( f , 1 , blocks [ hashes [ 0 ] ] , receipts [ hashes [ 0 ] ] , false )
hashes1 , headers1 , blocks1 , receipts1 := dl . makeChain ( f , 1 , blocks [ hashes [ 0 ] ] , receipts [ hashes [ 0 ] ] , false )
hashes1 = append ( hashes1 , hashes [ 1 : ] ... )
heavy := false
if ! balanced {
heavy = true
}
hashes2 , headers2 , blocks2 , receipts2 := makeChain ( f , 2 , blocks [ hashes [ 0 ] ] , receipts [ hashes [ 0 ] ] , heavy )
hashes2 , headers2 , blocks2 , receipts2 := dl . makeChain ( f , 2 , blocks [ hashes [ 0 ] ] , receipts [ hashes [ 0 ] ] , heavy )
hashes2 = append ( hashes2 , hashes [ 1 : ] ... )
for hash , header := range headers {
@ -132,53 +186,6 @@ func makeChainFork(n, f int, parent *types.Block, parentReceipts types.Receipts,
return hashes1 , hashes2 , headers1 , headers2 , blocks1 , blocks2 , receipts1 , receipts2
}
// downloadTester is a test simulator for mocking out local block chain.
type downloadTester struct {
stateDb ethdb . Database
downloader * Downloader
ownHashes [ ] common . Hash // Hash chain belonging to the tester
ownHeaders map [ common . Hash ] * types . Header // Headers belonging to the tester
ownBlocks map [ common . Hash ] * types . Block // Blocks belonging to the tester
ownReceipts map [ common . Hash ] types . Receipts // Receipts belonging to the tester
ownChainTd map [ common . Hash ] * big . Int // Total difficulties of the blocks in the local chain
peerHashes map [ string ] [ ] common . Hash // Hash chain belonging to different test peers
peerHeaders map [ string ] map [ common . Hash ] * types . Header // Headers belonging to different test peers
peerBlocks map [ string ] map [ common . Hash ] * types . Block // Blocks belonging to different test peers
peerReceipts map [ string ] map [ common . Hash ] types . Receipts // Receipts belonging to different test peers
peerChainTds map [ string ] map [ common . Hash ] * big . Int // Total difficulties of the blocks in the peer chains
peerMissingStates map [ string ] map [ common . Hash ] bool // State entries that fast sync should not return
lock sync . RWMutex
}
// newTester creates a new downloader test mocker.
func newTester ( ) * downloadTester {
tester := & downloadTester {
ownHashes : [ ] common . Hash { genesis . Hash ( ) } ,
ownHeaders : map [ common . Hash ] * types . Header { genesis . Hash ( ) : genesis . Header ( ) } ,
ownBlocks : map [ common . Hash ] * types . Block { genesis . Hash ( ) : genesis } ,
ownReceipts : map [ common . Hash ] types . Receipts { genesis . Hash ( ) : nil } ,
ownChainTd : map [ common . Hash ] * big . Int { genesis . Hash ( ) : genesis . Difficulty ( ) } ,
peerHashes : make ( map [ string ] [ ] common . Hash ) ,
peerHeaders : make ( map [ string ] map [ common . Hash ] * types . Header ) ,
peerBlocks : make ( map [ string ] map [ common . Hash ] * types . Block ) ,
peerReceipts : make ( map [ string ] map [ common . Hash ] types . Receipts ) ,
peerChainTds : make ( map [ string ] map [ common . Hash ] * big . Int ) ,
peerMissingStates : make ( map [ string ] map [ common . Hash ] bool ) ,
}
tester . stateDb , _ = ethdb . NewMemDatabase ( )
tester . stateDb . Put ( genesis . Root ( ) . Bytes ( ) , [ ] byte { 0x00 } )
tester . downloader = New ( tester . stateDb , new ( event . TypeMux ) , tester . hasHeader , tester . hasBlock , tester . getHeader ,
tester . getBlock , tester . headHeader , tester . headBlock , tester . headFastBlock , tester . commitHeadBlock , tester . getTd ,
tester . insertHeaders , tester . insertBlocks , tester . insertReceipts , tester . rollback , tester . dropPeer )
return tester
}
// terminate aborts any operations on the embedded downloader and releases all
// held resources.
func ( dl * downloadTester ) terminate ( ) {
@ -251,7 +258,7 @@ func (dl *downloadTester) headHeader() *types.Header {
return header
}
}
return genesis . Header ( )
return dl . genesis . Header ( )
}
// headBlock retrieves the current head block from the canonical chain.
@ -266,7 +273,7 @@ func (dl *downloadTester) headBlock() *types.Block {
}
}
}
return genesis
return dl . genesis
}
// headFastBlock retrieves the current head fast-sync block from the canonical chain.
@ -279,7 +286,7 @@ func (dl *downloadTester) headFastBlock() *types.Block {
return block
}
}
return genesis
return dl . genesis
}
// commitHeadBlock manually sets the head block to a given hash.
@ -351,7 +358,7 @@ func (dl *downloadTester) insertBlocks(blocks types.Blocks) (int, error) {
return len ( blocks ) , nil
}
// insertReceipts injects a new batch of block s into the simulated chain.
// insertReceipts injects a new batch of receipt s into the simulated chain.
func ( dl * downloadTester ) insertReceipts ( blocks types . Blocks , receipts [ ] types . Receipts ) ( int , error ) {
dl . lock . Lock ( )
defer dl . lock . Unlock ( )
@ -586,7 +593,7 @@ func (dl *downloadTester) peerGetNodeDataFn(id string, delay time.Duration) func
results := make ( [ ] [ ] byte , 0 , len ( hashes ) )
for _ , hash := range hashes {
if data , err := testd b. Get ( hash . Bytes ( ) ) ; err == nil {
if data , err := dl . peerD b. Get ( hash . Bytes ( ) ) ; err == nil {
if ! dl . peerMissingStates [ id ] [ hash ] {
results = append ( results , data )
}
@ -669,13 +676,13 @@ func TestCanonicalSynchronisation64Light(t *testing.T) { testCanonicalSynchronis
func testCanonicalSynchronisation ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
tester . newPeer ( "peer" , protocol , hashes , headers , blocks , receipts )
// Synchronise with the peer and make sure all relevant data was retrieved
@ -694,13 +701,13 @@ func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }
func TestThrottling64Fast ( t * testing . T ) { testThrottling ( t , 64 , FastSync ) }
func testThrottling ( t * testing . T , protocol int , mode SyncMode ) {
// Create a long block chain to download and the tester
targetBlocks := 8 * blockCacheLimit
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
// Create a long block chain to download and the tester
targetBlocks := 8 * blockCacheLimit
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
tester . newPeer ( "peer" , protocol , hashes , headers , blocks , receipts )
// Wrap the importer to allow stepping
@ -782,13 +789,13 @@ func TestForkedSync64Light(t *testing.T) { testForkedSync(t, 64, LightSync) }
func testForkedSync ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
// Create a long enough forked chain
common , fork := MaxHashFetch , 2 * MaxHashFetch
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := makeChainFork ( common + fork , fork , genesis , nil , true )
tester := newTester ( )
defer tester . terminate ( )
// Create a long enough forked chain
common , fork := MaxHashFetch , 2 * MaxHashFetch
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := tester . makeChainFork ( common + fork , fork , tester . genesis , nil , true )
tester . newPeer ( "fork A" , protocol , hashesA , headersA , blocksA , receiptsA )
tester . newPeer ( "fork B" , protocol , hashesB , headersB , blocksB , receiptsB )
@ -817,13 +824,13 @@ func TestHeavyForkedSync64Light(t *testing.T) { testHeavyForkedSync(t, 64, Light
func testHeavyForkedSync ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
// Create a long enough forked chain
common , fork := MaxHashFetch , 4 * MaxHashFetch
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := makeChainFork ( common + fork , fork , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
// Create a long enough forked chain
common , fork := MaxHashFetch , 4 * MaxHashFetch
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := tester . makeChainFork ( common + fork , fork , tester . genesis , nil , false )
tester . newPeer ( "light" , protocol , hashesA , headersA , blocksA , receiptsA )
tester . newPeer ( "heavy" , protocol , hashesB [ fork / 2 : ] , headersB , blocksB , receiptsB )
@ -853,13 +860,13 @@ func TestBoundedForkedSync64Light(t *testing.T) { testBoundedForkedSync(t, 64, L
func testBoundedForkedSync ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
// Create a long enough forked chain
common , fork := 13 , int ( MaxForkAncestry + 17 )
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := makeChainFork ( common + fork , fork , genesis , nil , true )
tester := newTester ( )
defer tester . terminate ( )
// Create a long enough forked chain
common , fork := 13 , int ( MaxForkAncestry + 17 )
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := tester . makeChainFork ( common + fork , fork , tester . genesis , nil , true )
tester . newPeer ( "original" , protocol , hashesA , headersA , blocksA , receiptsA )
tester . newPeer ( "rewriter" , protocol , hashesB , headersB , blocksB , receiptsB )
@ -888,13 +895,13 @@ func TestBoundedHeavyForkedSync64Light(t *testing.T) { testBoundedHeavyForkedSyn
func testBoundedHeavyForkedSync ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
// Create a long enough forked chain
common , fork := 13 , int ( MaxForkAncestry + 17 )
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := makeChainFork ( common + fork , fork , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
// Create a long enough forked chain
common , fork := 13 , int ( MaxForkAncestry + 17 )
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := tester . makeChainFork ( common + fork , fork , tester . genesis , nil , false )
tester . newPeer ( "original" , protocol , hashesA , headersA , blocksA , receiptsA )
tester . newPeer ( "heavy-rewriter" , protocol , hashesB [ MaxForkAncestry - 17 : ] , headersB , blocksB , receiptsB ) // Root the fork below the ancestor limit
@ -958,6 +965,9 @@ func TestCancel64Light(t *testing.T) { testCancel(t, 64, LightSync) }
func testCancel ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
tester := newTester ( )
defer tester . terminate ( )
// Create a small enough block chain to download and the tester
targetBlocks := blockCacheLimit - 15
if targetBlocks >= MaxHashFetch {
@ -966,10 +976,7 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) {
if targetBlocks >= MaxHeaderFetch {
targetBlocks = MaxHeaderFetch - 15
}
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
tester . newPeer ( "peer" , protocol , hashes , headers , blocks , receipts )
@ -999,13 +1006,13 @@ func TestMultiSynchronisation64Light(t *testing.T) { testMultiSynchronisation(t,
func testMultiSynchronisation ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
tester := newTester ( )
defer tester . terminate ( )
// Create various peers with various parts of the chain
targetPeers := 8
targetBlocks := targetPeers * blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
for i := 0 ; i < targetPeers ; i ++ {
id := fmt . Sprintf ( "peer #%d" , i )
@ -1029,14 +1036,14 @@ func TestMultiProtoSynchronisation64Light(t *testing.T) { testMultiProtoSync(t,
func testMultiProtoSync ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
tester := newTester ( )
defer tester . terminate ( )
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
// Create peers of every type
tester := newTester ( )
defer tester . terminate ( )
tester . newPeer ( "peer 62" , 62 , hashes , headers , blocks , nil )
tester . newPeer ( "peer 63" , 63 , hashes , headers , blocks , receipts )
tester . newPeer ( "peer 64" , 64 , hashes , headers , blocks , receipts )
@ -1068,13 +1075,13 @@ func TestEmptyShortCircuit64Light(t *testing.T) { testEmptyShortCircuit(t, 64, L
func testEmptyShortCircuit ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
// Create a block chain to download
targetBlocks := 2 * blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
// Create a block chain to download
targetBlocks := 2 * blockCacheLimit - 15
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
tester . newPeer ( "peer" , protocol , hashes , headers , blocks , receipts )
// Instrument the downloader to signal body requests
@ -1094,7 +1101,7 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) {
// Validate the number of block bodies that should have been requested
bodiesNeeded , receiptsNeeded := 0 , 0
for _ , block := range blocks {
if mode != LightSync && block != genesis && ( len ( block . Transactions ( ) ) > 0 || len ( block . Uncles ( ) ) > 0 ) {
if mode != LightSync && block != tester . genesis && ( len ( block . Transactions ( ) ) > 0 || len ( block . Uncles ( ) ) > 0 ) {
bodiesNeeded ++
}
}
@ -1123,13 +1130,13 @@ func TestMissingHeaderAttack64Light(t *testing.T) { testMissingHeaderAttack(t, 6
func testMissingHeaderAttack ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
// Attempt a full sync with an attacker feeding gapped headers
tester . newPeer ( "attack" , protocol , hashes , headers , blocks , receipts )
missing := targetBlocks / 2
@ -1156,13 +1163,13 @@ func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 6
func TestShiftedHeaderAttack64Light ( t * testing . T ) { testShiftedHeaderAttack ( t , 64 , LightSync ) }
func testShiftedHeaderAttack ( t * testing . T , protocol int , mode SyncMode ) {
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
// Attempt a full sync with an attacker feeding shifted headers
tester . newPeer ( "attack" , protocol , hashes , headers , blocks , receipts )
delete ( tester . peerHeaders [ "attack" ] , hashes [ len ( hashes ) - 2 ] )
@ -1188,13 +1195,13 @@ func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(
func TestInvalidHeaderRollback64Light ( t * testing . T ) { testInvalidHeaderRollback ( t , 64 , LightSync ) }
func testInvalidHeaderRollback ( t * testing . T , protocol int , mode SyncMode ) {
// Create a small enough block chain to download
targetBlocks := 3 * fsHeaderSafetyNet + fsMinFullBlocks
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
tester := newTester ( )
defer tester . terminate ( )
// Create a small enough block chain to download
targetBlocks := 3 * fsHeaderSafetyNet + fsPivotInterval + fsMinFullBlocks
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
// Attempt to sync with an attacker that feeds junk during the fast sync phase.
// This should result in the last fsHeaderSafetyNet headers being rolled back.
tester . newPeer ( "fast-attack" , protocol , hashes , headers , blocks , receipts )
@ -1286,7 +1293,7 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) {
tester := newTester ( )
defer tester . terminate ( )
hashes , headers , blocks , receipts := makeChain ( 0 , 0 , genesis , nil , false )
hashes , headers , blocks , receipts := tester . makeChain ( 0 , 0 , tester . genesis , nil , false )
tester . newPeer ( "attack" , protocol , [ ] common . Hash { hashes [ 0 ] } , headers , blocks , receipts )
if err := tester . sync ( "attack" , big . NewInt ( 1000000 ) , mode ) ; err != errStallingPeer {
@ -1333,7 +1340,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
for i , tt := range tests {
// Register a new peer and ensure it's presence
id := fmt . Sprintf ( "test %d" , i )
if err := tester . newPeer ( id , protocol , [ ] common . Hash { genesis . Hash ( ) } , nil , nil , nil ) ; err != nil {
if err := tester . newPeer ( id , protocol , [ ] common . Hash { tester . genesis . Hash ( ) } , nil , nil , nil ) ; err != nil {
t . Fatalf ( "test %d: failed to register new peer: %v" , i , err )
}
if _ , ok := tester . peerHashes [ id ] ; ! ok {
@ -1342,7 +1349,7 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) {
// Simulate a synchronisation and check the required result
tester . downloader . synchroniseMock = func ( string , common . Hash ) error { return tt . result }
tester . downloader . Synchronise ( id , genesis . Hash ( ) , big . NewInt ( 1000 ) , FullSync )
tester . downloader . Synchronise ( id , tester . genesis . Hash ( ) , big . NewInt ( 1000 ) , FullSync )
if _ , ok := tester . peerHashes [ id ] ; ! ok != tt . drop {
t . Errorf ( "test %d: peer drop mismatch for %v: have %v, want %v" , i , tt . result , ! ok , tt . drop )
}
@ -1361,17 +1368,17 @@ func TestSyncProgress64Light(t *testing.T) { testSyncProgress(t, 64, LightSync)
func testSyncProgress ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
tester := newTester ( )
defer tester . terminate ( )
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
// Set a sync init hook to catch progress changes
starting := make ( chan struct { } )
progress := make ( chan struct { } )
tester := newTester ( )
defer tester . terminate ( )
tester . downloader . syncInitHook = func ( origin , latest uint64 ) {
starting <- struct { } { }
<- progress
@ -1434,17 +1441,17 @@ func TestForkedSyncProgress64Light(t *testing.T) { testForkedSyncProgress(t, 64,
func testForkedSyncProgress ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
tester := newTester ( )
defer tester . terminate ( )
// Create a forked chain to simulate origin revertal
common , fork := MaxHashFetch , 2 * MaxHashFetch
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := makeChainFork ( common + fork , fork , genesis , nil , true )
hashesA , hashesB , headersA , headersB , blocksA , blocksB , receiptsA , receiptsB := tester . makeChainFork ( common + fork , fork , tester . genesis , nil , true )
// Set a sync init hook to catch progress changes
starting := make ( chan struct { } )
progress := make ( chan struct { } )
tester := newTester ( )
defer tester . terminate ( )
tester . downloader . syncInitHook = func ( origin , latest uint64 ) {
starting <- struct { } { }
<- progress
@ -1510,17 +1517,17 @@ func TestFailedSyncProgress64Light(t *testing.T) { testFailedSyncProgress(t, 64,
func testFailedSyncProgress ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
tester := newTester ( )
defer tester . terminate ( )
// Create a small enough block chain to download
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
// Set a sync init hook to catch progress changes
starting := make ( chan struct { } )
progress := make ( chan struct { } )
tester := newTester ( )
defer tester . terminate ( )
tester . downloader . syncInitHook = func ( origin , latest uint64 ) {
starting <- struct { } { }
<- progress
@ -1587,17 +1594,17 @@ func TestFakedSyncProgress64Light(t *testing.T) { testFakedSyncProgress(t, 64, L
func testFakedSyncProgress ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
tester := newTester ( )
defer tester . terminate ( )
// Create a small block chain
targetBlocks := blockCacheLimit - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks + 3 , 0 , genesis , nil , false )
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks + 3 , 0 , tester . genesis , nil , false )
// Set a sync init hook to catch progress changes
starting := make ( chan struct { } )
progress := make ( chan struct { } )
tester := newTester ( )
defer tester . terminate ( )
tester . downloader . syncInitHook = func ( origin , latest uint64 ) {
starting <- struct { } { }
<- progress
@ -1664,10 +1671,16 @@ func TestDeliverHeadersHang64Light(t *testing.T) { testDeliverHeadersHang(t, 64,
func testDeliverHeadersHang ( t * testing . T , protocol int , mode SyncMode ) {
t . Parallel ( )
hashes , headers , blocks , receipts := makeChain ( 5 , 0 , genesis , nil , false )
master := newTester ( )
defer master . terminate ( )
hashes , headers , blocks , receipts := master . makeChain ( 5 , 0 , master . genesis , nil , false )
fakeHeads := [ ] * types . Header { { } , { } , { } , { } }
for i := 0 ; i < 200 ; i ++ {
tester := newTester ( )
tester . peerDb = master . peerDb
tester . newPeer ( "peer" , protocol , hashes , headers , blocks , receipts )
// Whenever the downloader requests headers, flood it with
// a lot of unrequested header deliveries.
@ -1703,44 +1716,81 @@ func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) {
// Tests that if fast sync aborts in the critical section, it can restart a few
// times before giving up.
func TestFastCriticalRestarts63 ( t * testing . T ) { testFastCriticalRestarts ( t , 63 ) }
func TestFastCriticalRestarts64 ( t * testing . T ) { testFastCriticalRestarts ( t , 64 ) }
func TestFastCriticalRestartsFail63 ( t * testing . T ) { testFastCriticalRestarts ( t , 63 , false ) }
func TestFastCriticalRestartsFail64 ( t * testing . T ) { testFastCriticalRestarts ( t , 64 , false ) }
func TestFastCriticalRestartsCont63 ( t * testing . T ) { testFastCriticalRestarts ( t , 63 , true ) }
func TestFastCriticalRestartsCont64 ( t * testing . T ) { testFastCriticalRestarts ( t , 64 , true ) }
func testFastCriticalRestarts ( t * testing . T , protocol int ) {
t . Parallel ( )
func testFastCriticalRestarts ( t * testing . T , protocol int , progress bool ) {
tester := newTester ( )
defer tester . terminate ( )
// Create a large enough blockchin to actually fast sync on
targetBlocks := fsMinFullBlocks + 2 * fsPivotInterval - 15
hashes , headers , blocks , receipts := makeChain ( targetBlocks , 0 , genesis , nil , false )
// Create a tester peer with the critical section state roots missing (force failures)
tester := newTester ( )
defer tester . terminate ( )
hashes , headers , blocks , receipts := tester . makeChain ( targetBlocks , 0 , tester . genesis , nil , false )
// Create a tester peer with a critical section header missing (force failures)
tester . newPeer ( "peer" , protocol , hashes , headers , blocks , receipts )
delete ( tester . peerHeaders [ "peer" ] , hashes [ fsMinFullBlocks - 1 ] )
tester . downloader . dropPeer = func ( id string ) { } // We reuse the same "faulty" peer throughout the test
// Remove all possible pivot state roots and slow down replies (test failure resets later)
for i := 0 ; i < fsPivotInterval ; i ++ {
tester . peerMissingStates [ "peer" ] [ headers [ hashes [ fsMinFullBlocks + i ] ] . Root ] = true
}
tester . downloader . dropPeer = func ( id string ) { } // We reuse the same "faulty" peer throughout the test
tester . downloader . peers . peers [ "peer" ] . getNodeData = tester . peerGetNodeDataFn ( "peer" , 500 * time . Millisecond ) // Enough to reach the critical section
// Synchronise with the peer a few times and make sure they fail until the retry limit
for i := 0 ; i < fsCriticalTrials ; i ++ {
for i := 0 ; i < int ( fsCriticalTrials ) - 1 ; i ++ {
// Attempt a sync and ensure it fails properly
if err := tester . sync ( "peer" , nil , FastSync ) ; err == nil {
t . Fatalf ( "failing fast sync succeeded: %v" , err )
}
time . Sleep ( 10 0 * time . Millisecond ) // Make sure no in-flight requests remain
time . Sleep ( 15 0 * time . Millisecond ) // Make sure no in-flight requests remain
// If it's the first failure, pivot should be locked => reenable all others to detect pivot changes
if i == 0 {
if tester . downloader . fsPivotLock == nil {
time . Sleep ( 400 * time . Millisecond ) // Make sure the first huge timeout expires too
t . Fatalf ( "pivot block not locked in after critical section failure" )
}
tester . lock . Lock ( )
tester . peerHeaders [ "peer" ] [ hashes [ fsMinFullBlocks - 1 ] ] = headers [ hashes [ fsMinFullBlocks - 1 ] ]
tester . peerMissingStates [ "peer" ] = map [ common . Hash ] bool { tester . downloader . fsPivotLock . Root : true }
tester . downloader . peers . peers [ "peer" ] . getNodeData = tester . peerGetNodeDataFn ( "peer" , 0 )
tester . lock . Unlock ( )
}
}
// Return all nodes if we're testing fast sync progression
if progress {
tester . lock . Lock ( )
tester . peerMissingStates [ "peer" ] = map [ common . Hash ] bool { }
tester . lock . Unlock ( )
if err := tester . sync ( "peer" , nil , FastSync ) ; err != nil {
t . Fatalf ( "failed to synchronise blocks in progressed fast sync: %v" , err )
}
time . Sleep ( 150 * time . Millisecond ) // Make sure no in-flight requests remain
if fails := atomic . LoadUint32 ( & tester . downloader . fsPivotFails ) ; fails != 1 {
t . Fatalf ( "progressed pivot trial count mismatch: have %v, want %v" , fails , 1 )
}
assertOwnChain ( t , tester , targetBlocks + 1 )
} else {
if err := tester . sync ( "peer" , nil , FastSync ) ; err == nil {
t . Fatalf ( "succeeded to synchronise blocks in failed fast sync" )
}
time . Sleep ( 150 * time . Millisecond ) // Make sure no in-flight requests remain
if fails := atomic . LoadUint32 ( & tester . downloader . fsPivotFails ) ; fails != fsCriticalTrials {
t . Fatalf ( "failed pivot trial count mismatch: have %v, want %v" , fails , fsCriticalTrials )
}
}
// Retry limit exhausted, downloader will switch to full sync, should succeed
if err := tester . sync ( "peer" , nil , FastSync ) ; err != nil {
t . Fatalf ( "failed to synchronise blocks in slow sync: %v" , err )
}
assertOwnChain ( t , tester , targetBlocks + 1 )
// Note, we can't assert the chain here because the test asserter assumes sync
// completed using a single mode of operation, whereas fast-then-slow can result
// in arbitrary intermediate state that's not cleanly verifiable.
}