@ -21,14 +21,15 @@ import (
"testing"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
)
// makeTestTrie create a sample test trie to test node-wise reconstruction.
// makeTestTrie create a sample test trie to test node-wise reconstruction.
func makeTestTrie ( ) ( * Database , * Trie , map [ string ] [ ] byte ) {
func makeTestTrie ( ) ( * Database , * Secure Trie, map [ string ] [ ] byte ) {
// Create an empty trie
// Create an empty trie
triedb := NewDatabase ( memorydb . New ( ) )
triedb := NewDatabase ( memorydb . New ( ) )
trie , _ := New ( common . Hash { } , triedb )
trie , _ := NewSecure ( common . Hash { } , triedb )
// Fill it with some arbitrary data
// Fill it with some arbitrary data
content := make ( map [ string ] [ ] byte )
content := make ( map [ string ] [ ] byte )
@ -59,7 +60,7 @@ func makeTestTrie() (*Database, *Trie, map[string][]byte) {
// content map.
// content map.
func checkTrieContents ( t * testing . T , db * Database , root [ ] byte , content map [ string ] [ ] byte ) {
func checkTrieContents ( t * testing . T , db * Database , root [ ] byte , content map [ string ] [ ] byte ) {
// Check root availability and trie contents
// Check root availability and trie contents
trie , err := New ( common . BytesToHash ( root ) , db )
trie , err := NewSecure ( common . BytesToHash ( root ) , db )
if err != nil {
if err != nil {
t . Fatalf ( "failed to create trie at %x: %v" , root , err )
t . Fatalf ( "failed to create trie at %x: %v" , root , err )
}
}
@ -76,7 +77,7 @@ func checkTrieContents(t *testing.T, db *Database, root []byte, content map[stri
// checkTrieConsistency checks that all nodes in a trie are indeed present.
// checkTrieConsistency checks that all nodes in a trie are indeed present.
func checkTrieConsistency ( db * Database , root common . Hash ) error {
func checkTrieConsistency ( db * Database , root common . Hash ) error {
// Create and iterate a trie rooted in a subnode
// Create and iterate a trie rooted in a subnode
trie , err := New ( root , db )
trie , err := NewSecure ( root , db )
if err != nil {
if err != nil {
return nil // Consider a non existent state consistent
return nil // Consider a non existent state consistent
}
}
@ -94,18 +95,21 @@ func TestEmptySync(t *testing.T) {
emptyB , _ := New ( emptyRoot , dbB )
emptyB , _ := New ( emptyRoot , dbB )
for i , trie := range [ ] * Trie { emptyA , emptyB } {
for i , trie := range [ ] * Trie { emptyA , emptyB } {
if req := NewSync ( trie . Hash ( ) , memorydb . New ( ) , nil , NewSyncBloom ( 1 , memorydb . New ( ) ) ) . Missing ( 1 ) ; len ( req ) != 0 {
sync := NewSync ( trie . Hash ( ) , memorydb . New ( ) , nil , NewSyncBloom ( 1 , memorydb . New ( ) ) )
t . Errorf ( "test %d: content requested for empty trie: %v" , i , req )
if nodes , paths , codes := sync . Missing ( 1 ) ; len ( nodes ) != 0 || len ( paths ) != 0 || len ( codes ) != 0 {
t . Errorf ( "test %d: content requested for empty trie: %v, %v, %v" , i , nodes , paths , codes )
}
}
}
}
}
}
// Tests that given a root hash, a trie can sync iteratively on a single thread,
// Tests that given a root hash, a trie can sync iteratively on a single thread,
// requesting retrieval tasks and returning all of them in one go.
// requesting retrieval tasks and returning all of them in one go.
func TestIterativeSyncIndividual ( t * testing . T ) { testIterativeSync ( t , 1 ) }
func TestIterativeSyncIndividual ( t * testing . T ) { testIterativeSync ( t , 1 , false ) }
func TestIterativeSyncBatched ( t * testing . T ) { testIterativeSync ( t , 100 ) }
func TestIterativeSyncBatched ( t * testing . T ) { testIterativeSync ( t , 100 , false ) }
func TestIterativeSyncIndividualByPath ( t * testing . T ) { testIterativeSync ( t , 1 , true ) }
func TestIterativeSyncBatchedByPath ( t * testing . T ) { testIterativeSync ( t , 100 , true ) }
func testIterativeSync ( t * testing . T , count int ) {
func testIterativeSync ( t * testing . T , count int , bypath bool ) {
// Create a random trie to copy
// Create a random trie to copy
srcDb , srcTrie , srcData := makeTestTrie ( )
srcDb , srcTrie , srcData := makeTestTrie ( )
@ -114,16 +118,33 @@ func testIterativeSync(t *testing.T, count int) {
triedb := NewDatabase ( diskdb )
triedb := NewDatabase ( diskdb )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
queue := append ( [ ] common . Hash { } , sched . Missing ( count ) ... )
nodes , paths , codes := sched . Missing ( count )
for len ( queue ) > 0 {
var (
results := make ( [ ] SyncResult , len ( queue ) )
hashQueue [ ] common . Hash
for i , hash := range queue {
pathQueue [ ] SyncPath
)
if ! bypath {
hashQueue = append ( append ( hashQueue [ : 0 ] , nodes ... ) , codes ... )
} else {
hashQueue = append ( hashQueue [ : 0 ] , codes ... )
pathQueue = append ( pathQueue [ : 0 ] , paths ... )
}
for len ( hashQueue ) + len ( pathQueue ) > 0 {
results := make ( [ ] SyncResult , len ( hashQueue ) + len ( pathQueue ) )
for i , hash := range hashQueue {
data , err := srcDb . Node ( hash )
data , err := srcDb . Node ( hash )
if err != nil {
if err != nil {
t . Fatalf ( "failed to retrieve node data for %x: %v" , hash , err )
t . Fatalf ( "failed to retrieve node data for hash %x: %v" , hash , err )
}
}
results [ i ] = SyncResult { hash , data }
results [ i ] = SyncResult { hash , data }
}
}
for i , path := range pathQueue {
data , _ , err := srcTrie . TryGetNode ( path [ 0 ] )
if err != nil {
t . Fatalf ( "failed to retrieve node data for path %x: %v" , path , err )
}
results [ len ( hashQueue ) + i ] = SyncResult { crypto . Keccak256Hash ( data ) , data }
}
for _ , result := range results {
for _ , result := range results {
if err := sched . Process ( result ) ; err != nil {
if err := sched . Process ( result ) ; err != nil {
t . Fatalf ( "failed to process result %v" , err )
t . Fatalf ( "failed to process result %v" , err )
@ -134,7 +155,14 @@ func testIterativeSync(t *testing.T, count int) {
t . Fatalf ( "failed to commit data: %v" , err )
t . Fatalf ( "failed to commit data: %v" , err )
}
}
batch . Write ( )
batch . Write ( )
queue = append ( queue [ : 0 ] , sched . Missing ( count ) ... )
nodes , paths , codes = sched . Missing ( count )
if ! bypath {
hashQueue = append ( append ( hashQueue [ : 0 ] , nodes ... ) , codes ... )
} else {
hashQueue = append ( hashQueue [ : 0 ] , codes ... )
pathQueue = append ( pathQueue [ : 0 ] , paths ... )
}
}
}
// Cross check that the two tries are in sync
// Cross check that the two tries are in sync
checkTrieContents ( t , triedb , srcTrie . Hash ( ) . Bytes ( ) , srcData )
checkTrieContents ( t , triedb , srcTrie . Hash ( ) . Bytes ( ) , srcData )
@ -151,7 +179,9 @@ func TestIterativeDelayedSync(t *testing.T) {
triedb := NewDatabase ( diskdb )
triedb := NewDatabase ( diskdb )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
queue := append ( [ ] common . Hash { } , sched . Missing ( 10000 ) ... )
nodes , _ , codes := sched . Missing ( 10000 )
queue := append ( append ( [ ] common . Hash { } , nodes ... ) , codes ... )
for len ( queue ) > 0 {
for len ( queue ) > 0 {
// Sync only half of the scheduled nodes
// Sync only half of the scheduled nodes
results := make ( [ ] SyncResult , len ( queue ) / 2 + 1 )
results := make ( [ ] SyncResult , len ( queue ) / 2 + 1 )
@ -172,7 +202,9 @@ func TestIterativeDelayedSync(t *testing.T) {
t . Fatalf ( "failed to commit data: %v" , err )
t . Fatalf ( "failed to commit data: %v" , err )
}
}
batch . Write ( )
batch . Write ( )
queue = append ( queue [ len ( results ) : ] , sched . Missing ( 10000 ) ... )
nodes , _ , codes = sched . Missing ( 10000 )
queue = append ( append ( queue [ len ( results ) : ] , nodes ... ) , codes ... )
}
}
// Cross check that the two tries are in sync
// Cross check that the two tries are in sync
checkTrieContents ( t , triedb , srcTrie . Hash ( ) . Bytes ( ) , srcData )
checkTrieContents ( t , triedb , srcTrie . Hash ( ) . Bytes ( ) , srcData )
@ -194,7 +226,8 @@ func testIterativeRandomSync(t *testing.T, count int) {
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
queue := make ( map [ common . Hash ] struct { } )
queue := make ( map [ common . Hash ] struct { } )
for _ , hash := range sched . Missing ( count ) {
nodes , _ , codes := sched . Missing ( count )
for _ , hash := range append ( nodes , codes ... ) {
queue [ hash ] = struct { } { }
queue [ hash ] = struct { } { }
}
}
for len ( queue ) > 0 {
for len ( queue ) > 0 {
@ -218,8 +251,10 @@ func testIterativeRandomSync(t *testing.T, count int) {
t . Fatalf ( "failed to commit data: %v" , err )
t . Fatalf ( "failed to commit data: %v" , err )
}
}
batch . Write ( )
batch . Write ( )
queue = make ( map [ common . Hash ] struct { } )
queue = make ( map [ common . Hash ] struct { } )
for _ , hash := range sched . Missing ( count ) {
nodes , _ , codes = sched . Missing ( count )
for _ , hash := range append ( nodes , codes ... ) {
queue [ hash ] = struct { } { }
queue [ hash ] = struct { } { }
}
}
}
}
@ -239,7 +274,8 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
queue := make ( map [ common . Hash ] struct { } )
queue := make ( map [ common . Hash ] struct { } )
for _ , hash := range sched . Missing ( 10000 ) {
nodes , _ , codes := sched . Missing ( 10000 )
for _ , hash := range append ( nodes , codes ... ) {
queue [ hash ] = struct { } { }
queue [ hash ] = struct { } { }
}
}
for len ( queue ) > 0 {
for len ( queue ) > 0 {
@ -270,7 +306,8 @@ func TestIterativeRandomDelayedSync(t *testing.T) {
for _ , result := range results {
for _ , result := range results {
delete ( queue , result . Hash )
delete ( queue , result . Hash )
}
}
for _ , hash := range sched . Missing ( 10000 ) {
nodes , _ , codes = sched . Missing ( 10000 )
for _ , hash := range append ( nodes , codes ... ) {
queue [ hash ] = struct { } { }
queue [ hash ] = struct { } { }
}
}
}
}
@ -289,7 +326,8 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
triedb := NewDatabase ( diskdb )
triedb := NewDatabase ( diskdb )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
queue := append ( [ ] common . Hash { } , sched . Missing ( 0 ) ... )
nodes , _ , codes := sched . Missing ( 0 )
queue := append ( append ( [ ] common . Hash { } , nodes ... ) , codes ... )
requested := make ( map [ common . Hash ] struct { } )
requested := make ( map [ common . Hash ] struct { } )
for len ( queue ) > 0 {
for len ( queue ) > 0 {
@ -316,7 +354,9 @@ func TestDuplicateAvoidanceSync(t *testing.T) {
t . Fatalf ( "failed to commit data: %v" , err )
t . Fatalf ( "failed to commit data: %v" , err )
}
}
batch . Write ( )
batch . Write ( )
queue = append ( queue [ : 0 ] , sched . Missing ( 0 ) ... )
nodes , _ , codes = sched . Missing ( 0 )
queue = append ( append ( queue [ : 0 ] , nodes ... ) , codes ... )
}
}
// Cross check that the two tries are in sync
// Cross check that the two tries are in sync
checkTrieContents ( t , triedb , srcTrie . Hash ( ) . Bytes ( ) , srcData )
checkTrieContents ( t , triedb , srcTrie . Hash ( ) . Bytes ( ) , srcData )
@ -334,7 +374,10 @@ func TestIncompleteSync(t *testing.T) {
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
var added [ ] common . Hash
var added [ ] common . Hash
queue := append ( [ ] common . Hash { } , sched . Missing ( 1 ) ... )
nodes , _ , codes := sched . Missing ( 1 )
queue := append ( append ( [ ] common . Hash { } , nodes ... ) , codes ... )
for len ( queue ) > 0 {
for len ( queue ) > 0 {
// Fetch a batch of trie nodes
// Fetch a batch of trie nodes
results := make ( [ ] SyncResult , len ( queue ) )
results := make ( [ ] SyncResult , len ( queue ) )
@ -366,7 +409,8 @@ func TestIncompleteSync(t *testing.T) {
}
}
}
}
// Fetch the next batch to retrieve
// Fetch the next batch to retrieve
queue = append ( queue [ : 0 ] , sched . Missing ( 1 ) ... )
nodes , _ , codes = sched . Missing ( 1 )
queue = append ( append ( queue [ : 0 ] , nodes ... ) , codes ... )
}
}
// Sanity check that removing any node from the database is detected
// Sanity check that removing any node from the database is detected
for _ , node := range added [ 1 : ] {
for _ , node := range added [ 1 : ] {
@ -380,3 +424,58 @@ func TestIncompleteSync(t *testing.T) {
diskdb . Put ( key , value )
diskdb . Put ( key , value )
}
}
}
}
// Tests that trie nodes get scheduled lexicographically when having the same
// depth.
func TestSyncOrdering ( t * testing . T ) {
// Create a random trie to copy
srcDb , srcTrie , srcData := makeTestTrie ( )
// Create a destination trie and sync with the scheduler, tracking the requests
diskdb := memorydb . New ( )
triedb := NewDatabase ( diskdb )
sched := NewSync ( srcTrie . Hash ( ) , diskdb , nil , NewSyncBloom ( 1 , diskdb ) )
nodes , paths , _ := sched . Missing ( 1 )
queue := append ( [ ] common . Hash { } , nodes ... )
reqs := append ( [ ] SyncPath { } , paths ... )
for len ( queue ) > 0 {
results := make ( [ ] SyncResult , len ( queue ) )
for i , hash := range queue {
data , err := srcDb . Node ( hash )
if err != nil {
t . Fatalf ( "failed to retrieve node data for %x: %v" , hash , err )
}
results [ i ] = SyncResult { hash , data }
}
for _ , result := range results {
if err := sched . Process ( result ) ; err != nil {
t . Fatalf ( "failed to process result %v" , err )
}
}
batch := diskdb . NewBatch ( )
if err := sched . Commit ( batch ) ; err != nil {
t . Fatalf ( "failed to commit data: %v" , err )
}
batch . Write ( )
nodes , paths , _ = sched . Missing ( 1 )
queue = append ( queue [ : 0 ] , nodes ... )
reqs = append ( reqs , paths ... )
}
// Cross check that the two tries are in sync
checkTrieContents ( t , triedb , srcTrie . Hash ( ) . Bytes ( ) , srcData )
// Check that the trie nodes have been requested path-ordered
for i := 0 ; i < len ( reqs ) - 1 ; i ++ {
if len ( reqs [ i ] ) > 1 || len ( reqs [ i + 1 ] ) > 1 {
// In the case of the trie tests, there's no storage so the tuples
// must always be single items. 2-tuples should be tested in state.
t . Errorf ( "Invalid request tuples: len(%v) or len(%v) > 1" , reqs [ i ] , reqs [ i + 1 ] )
}
if bytes . Compare ( compactToHex ( reqs [ i ] [ 0 ] ) , compactToHex ( reqs [ i + 1 ] [ 0 ] ) ) > 0 {
t . Errorf ( "Invalid request order: %v before %v" , compactToHex ( reqs [ i ] [ 0 ] ) , compactToHex ( reqs [ i + 1 ] [ 0 ] ) )
}
}
}