@ -17,8 +17,10 @@
package light
package light
import (
import (
"context"
"encoding/binary"
"encoding/binary"
"errors"
"errors"
"fmt"
"math/big"
"math/big"
"time"
"time"
@ -47,35 +49,35 @@ const (
HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated
HelperTrieProcessConfirmations = 256 // number of confirmations before a HelperTrie is generated
)
)
// t rustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with
// T rustedCheckpoint represents a set of post-processed trie roots (CHT and BloomTrie) associated with
// the appropriate section index and head hash. It is used to start light syncing from this checkpoint
// the appropriate section index and head hash. It is used to start light syncing from this checkpoint
// and avoid downloading the entire header chain while still being able to securely access old headers/logs.
// and avoid downloading the entire header chain while still being able to securely access old headers/logs.
type t rustedCheckpoint struct {
type T rustedCheckpoint struct {
name string
name string
sectionIdx uint64
SectionIdx uint64
sectionHead , chtRoot , bloomTrie Root common . Hash
SectionHead , CHTRoot , Bloom Root common . Hash
}
}
var (
var (
mainnetCheckpoint = t rustedCheckpoint{
mainnetCheckpoint = T rustedCheckpoint{
name : "mainnet" ,
name : "mainnet" ,
sectionIdx : 179 ,
SectionIdx : 179 ,
sectionHead : common . HexToHash ( "ae778e455492db1183e566fa0c67f954d256fdd08618f6d5a393b0e24576d0ea" ) ,
SectionHead : common . HexToHash ( "ae778e455492db1183e566fa0c67f954d256fdd08618f6d5a393b0e24576d0ea" ) ,
chtRoot : common . HexToHash ( "646b338f9ca74d936225338916be53710ec84020b89946004a8605f04c817f16" ) ,
CHTRoot : common . HexToHash ( "646b338f9ca74d936225338916be53710ec84020b89946004a8605f04c817f16" ) ,
bloomTrieRoot : common . HexToHash ( "d0f978f5dbc86e5bf931d8dd5b2ecbebbda6dc78f8896af6a27b46a3ced0ac25" ) ,
BloomRoot : common . HexToHash ( "d0f978f5dbc86e5bf931d8dd5b2ecbebbda6dc78f8896af6a27b46a3ced0ac25" ) ,
}
}
ropstenCheckpoint = t rustedCheckpoint{
ropstenCheckpoint = T rustedCheckpoint{
name : "ropsten" ,
name : "ropsten" ,
sectionIdx : 107 ,
SectionIdx : 107 ,
sectionHead : common . HexToHash ( "e1988f95399debf45b873e065e5cd61b416ef2e2e5deec5a6f87c3127086e1ce" ) ,
SectionHead : common . HexToHash ( "e1988f95399debf45b873e065e5cd61b416ef2e2e5deec5a6f87c3127086e1ce" ) ,
chtRoot : common . HexToHash ( "15cba18e4de0ab1e95e202625199ba30147aec8b0b70384b66ebea31ba6a18e0" ) ,
CHTRoot : common . HexToHash ( "15cba18e4de0ab1e95e202625199ba30147aec8b0b70384b66ebea31ba6a18e0" ) ,
bloomTrieRoot : common . HexToHash ( "e00fa6389b2e597d9df52172cd8e936879eed0fca4fa59db99e2c8ed682562f2" ) ,
BloomRoot : common . HexToHash ( "e00fa6389b2e597d9df52172cd8e936879eed0fca4fa59db99e2c8ed682562f2" ) ,
}
}
)
)
// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to
// trustedCheckpoints associates each known checkpoint with the genesis hash of the chain it belongs to
var trustedCheckpoints = map [ common . Hash ] t rustedCheckpoint{
var trustedCheckpoints = map [ common . Hash ] T rustedCheckpoint{
params . MainnetGenesisHash : mainnetCheckpoint ,
params . MainnetGenesisHash : mainnetCheckpoint ,
params . TestnetGenesisHash : ropstenCheckpoint ,
params . TestnetGenesisHash : ropstenCheckpoint ,
}
}
@ -119,7 +121,8 @@ func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common
// ChtIndexerBackend implements core.ChainIndexerBackend
// ChtIndexerBackend implements core.ChainIndexerBackend
type ChtIndexerBackend struct {
type ChtIndexerBackend struct {
diskdb ethdb . Database
diskdb , trieTable ethdb . Database
odr OdrBackend
triedb * trie . Database
triedb * trie . Database
section , sectionSize uint64
section , sectionSize uint64
lastHash common . Hash
lastHash common . Hash
@ -127,7 +130,7 @@ type ChtIndexerBackend struct {
}
}
// NewBloomTrieIndexer creates a BloomTrie chain indexer
// NewBloomTrieIndexer creates a BloomTrie chain indexer
func NewChtIndexer ( db ethdb . Database , clientMode bool ) * core . ChainIndexer {
func NewChtIndexer ( db ethdb . Database , clientMode bool , odr OdrBackend ) * core . ChainIndexer {
var sectionSize , confirmReq uint64
var sectionSize , confirmReq uint64
if clientMode {
if clientMode {
sectionSize = CHTFrequencyClient
sectionSize = CHTFrequencyClient
@ -137,28 +140,64 @@ func NewChtIndexer(db ethdb.Database, clientMode bool) *core.ChainIndexer {
confirmReq = HelperTrieProcessConfirmations
confirmReq = HelperTrieProcessConfirmations
}
}
idb := ethdb . NewTable ( db , "chtIndex-" )
idb := ethdb . NewTable ( db , "chtIndex-" )
trieTable := ethdb . NewTable ( db , ChtTablePrefix )
backend := & ChtIndexerBackend {
backend := & ChtIndexerBackend {
diskdb : db ,
diskdb : db ,
triedb : trie . NewDatabase ( ethdb . NewTable ( db , ChtTablePrefix ) ) ,
odr : odr ,
trieTable : trieTable ,
triedb : trie . NewDatabase ( trieTable ) ,
sectionSize : sectionSize ,
sectionSize : sectionSize ,
}
}
return core . NewChainIndexer ( db , idb , backend , sectionSize , confirmReq , time . Millisecond * 100 , "cht" )
return core . NewChainIndexer ( db , idb , backend , sectionSize , confirmReq , time . Millisecond * 100 , "cht" )
}
}
// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
// ODR backend in order to be able to add new entries and calculate subsequent root hashes
func ( c * ChtIndexerBackend ) fetchMissingNodes ( ctx context . Context , section uint64 , root common . Hash ) error {
batch := c . trieTable . NewBatch ( )
r := & ChtRequest { ChtRoot : root , ChtNum : section - 1 , BlockNum : section * c . sectionSize - 1 }
for {
err := c . odr . Retrieve ( ctx , r )
switch err {
case nil :
r . Proof . Store ( batch )
return batch . Write ( )
case ErrNoPeers :
// if there are no peers to serve, retry later
select {
case <- ctx . Done ( ) :
return ctx . Err ( )
case <- time . After ( time . Second * 10 ) :
// stay in the loop and try again
}
default :
return err
}
}
}
// Reset implements core.ChainIndexerBackend
// Reset implements core.ChainIndexerBackend
func ( c * ChtIndexerBackend ) Reset ( section uint64 , lastSectionHead common . Hash ) error {
func ( c * ChtIndexerBackend ) Reset ( ctx context . Context , section uint64 , lastSectionHead common . Hash ) error {
var root common . Hash
var root common . Hash
if section > 0 {
if section > 0 {
root = GetChtRoot ( c . diskdb , section - 1 , lastSectionHead )
root = GetChtRoot ( c . diskdb , section - 1 , lastSectionHead )
}
}
var err error
var err error
c . trie , err = trie . New ( root , c . triedb )
c . trie , err = trie . New ( root , c . triedb )
if err != nil && c . odr != nil {
err = c . fetchMissingNodes ( ctx , section , root )
if err == nil {
c . trie , err = trie . New ( root , c . triedb )
}
}
c . section = section
c . section = section
return err
return err
}
}
// Process implements core.ChainIndexerBackend
// Process implements core.ChainIndexerBackend
func ( c * ChtIndexerBackend ) Process ( header * types . Header ) {
func ( c * ChtIndexerBackend ) Process ( ctx context . Context , header * types . Header ) error {
hash , num := header . Hash ( ) , header . Number . Uint64 ( )
hash , num := header . Hash ( ) , header . Number . Uint64 ( )
c . lastHash = hash
c . lastHash = hash
@ -170,6 +209,7 @@ func (c *ChtIndexerBackend) Process(header *types.Header) {
binary . BigEndian . PutUint64 ( encNumber [ : ] , num )
binary . BigEndian . PutUint64 ( encNumber [ : ] , num )
data , _ := rlp . EncodeToBytes ( ChtNode { hash , td } )
data , _ := rlp . EncodeToBytes ( ChtNode { hash , td } )
c . trie . Update ( encNumber [ : ] , data )
c . trie . Update ( encNumber [ : ] , data )
return nil
}
}
// Commit implements core.ChainIndexerBackend
// Commit implements core.ChainIndexerBackend
@ -181,7 +221,7 @@ func (c *ChtIndexerBackend) Commit() error {
c . triedb . Commit ( root , false )
c . triedb . Commit ( root , false )
if ( ( c . section + 1 ) * c . sectionSize ) % CHTFrequencyClient == 0 {
if ( ( c . section + 1 ) * c . sectionSize ) % CHTFrequencyClient == 0 {
log . Info ( "Storing CHT" , "section" , c . section * c . sectionSize / CHTFrequencyClient , "head" , c . lastHash , "root" , root )
log . Info ( "Storing CHT" , "section" , c . section * c . sectionSize / CHTFrequencyClient , "head" , fmt . Sprintf ( "%064x" , c . lastHash ) , "root" , fmt . Sprintf ( "%064x" , root ) )
}
}
StoreChtRoot ( c . diskdb , c . section , c . lastHash , root )
StoreChtRoot ( c . diskdb , c . section , c . lastHash , root )
return nil
return nil
@ -190,7 +230,6 @@ func (c *ChtIndexerBackend) Commit() error {
const (
const (
BloomTrieFrequency = 32768
BloomTrieFrequency = 32768
ethBloomBitsSection = 4096
ethBloomBitsSection = 4096
ethBloomBitsConfirmations = 256
)
)
var (
var (
@ -215,7 +254,8 @@ func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root
// BloomTrieIndexerBackend implements core.ChainIndexerBackend
// BloomTrieIndexerBackend implements core.ChainIndexerBackend
type BloomTrieIndexerBackend struct {
type BloomTrieIndexerBackend struct {
diskdb ethdb . Database
diskdb , trieTable ethdb . Database
odr OdrBackend
triedb * trie . Database
triedb * trie . Database
section , parentSectionSize , bloomTrieRatio uint64
section , parentSectionSize , bloomTrieRatio uint64
trie * trie . Trie
trie * trie . Trie
@ -223,44 +263,98 @@ type BloomTrieIndexerBackend struct {
}
}
// NewBloomTrieIndexer creates a BloomTrie chain indexer
// NewBloomTrieIndexer creates a BloomTrie chain indexer
func NewBloomTrieIndexer ( db ethdb . Database , clientMode bool ) * core . ChainIndexer {
func NewBloomTrieIndexer ( db ethdb . Database , clientMode bool , odr OdrBackend ) * core . ChainIndexer {
trieTable := ethdb . NewTable ( db , BloomTrieTablePrefix )
backend := & BloomTrieIndexerBackend {
backend := & BloomTrieIndexerBackend {
diskdb : db ,
diskdb : db ,
triedb : trie . NewDatabase ( ethdb . NewTable ( db , BloomTrieTablePrefix ) ) ,
odr : odr ,
trieTable : trieTable ,
triedb : trie . NewDatabase ( trieTable ) ,
}
}
idb := ethdb . NewTable ( db , "bltIndex-" )
idb := ethdb . NewTable ( db , "bltIndex-" )
var confirmReq uint64
if clientMode {
if clientMode {
backend . parentSectionSize = BloomTrieFrequency
backend . parentSectionSize = BloomTrieFrequency
confirmReq = HelperTrieConfirmations
} else {
} else {
backend . parentSectionSize = ethBloomBitsSection
backend . parentSectionSize = ethBloomBitsSection
confirmReq = HelperTrieProcessConfirmations
}
}
backend . bloomTrieRatio = BloomTrieFrequency / backend . parentSectionSize
backend . bloomTrieRatio = BloomTrieFrequency / backend . parentSectionSize
backend . sectionHeads = make ( [ ] common . Hash , backend . bloomTrieRatio )
backend . sectionHeads = make ( [ ] common . Hash , backend . bloomTrieRatio )
return core . NewChainIndexer ( db , idb , backend , BloomTrieFrequency , confirmReq - ethBloomBitsConfirmations , time . Millisecond * 100 , "bloomtrie" )
return core . NewChainIndexer ( db , idb , backend , BloomTrieFrequency , 0 , time . Millisecond * 100 , "bloomtrie" )
}
// fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the
// ODR backend in order to be able to add new entries and calculate subsequent root hashes
func ( b * BloomTrieIndexerBackend ) fetchMissingNodes ( ctx context . Context , section uint64 , root common . Hash ) error {
indexCh := make ( chan uint , types . BloomBitLength )
type res struct {
nodes * NodeSet
err error
}
resCh := make ( chan res , types . BloomBitLength )
for i := 0 ; i < 20 ; i ++ {
go func ( ) {
for bitIndex := range indexCh {
r := & BloomRequest { BloomTrieRoot : root , BloomTrieNum : section - 1 , BitIdx : bitIndex , SectionIdxList : [ ] uint64 { section - 1 } }
for {
if err := b . odr . Retrieve ( ctx , r ) ; err == ErrNoPeers {
// if there are no peers to serve, retry later
select {
case <- ctx . Done ( ) :
resCh <- res { nil , ctx . Err ( ) }
return
case <- time . After ( time . Second * 10 ) :
// stay in the loop and try again
}
} else {
resCh <- res { r . Proofs , err }
break
}
}
}
} ( )
}
for i := uint ( 0 ) ; i < types . BloomBitLength ; i ++ {
indexCh <- i
}
close ( indexCh )
batch := b . trieTable . NewBatch ( )
for i := uint ( 0 ) ; i < types . BloomBitLength ; i ++ {
res := <- resCh
if res . err != nil {
return res . err
}
res . nodes . Store ( batch )
}
return batch . Write ( )
}
}
// Reset implements core.ChainIndexerBackend
// Reset implements core.ChainIndexerBackend
func ( b * BloomTrieIndexerBackend ) Reset ( section uint64 , lastSectionHead common . Hash ) error {
func ( b * BloomTrieIndexerBackend ) Reset ( ctx context . Context , section uint64 , lastSectionHead common . Hash ) error {
var root common . Hash
var root common . Hash
if section > 0 {
if section > 0 {
root = GetBloomTrieRoot ( b . diskdb , section - 1 , lastSectionHead )
root = GetBloomTrieRoot ( b . diskdb , section - 1 , lastSectionHead )
}
}
var err error
var err error
b . trie , err = trie . New ( root , b . triedb )
b . trie , err = trie . New ( root , b . triedb )
if err != nil && b . odr != nil {
err = b . fetchMissingNodes ( ctx , section , root )
if err == nil {
b . trie , err = trie . New ( root , b . triedb )
}
}
b . section = section
b . section = section
return err
return err
}
}
// Process implements core.ChainIndexerBackend
// Process implements core.ChainIndexerBackend
func ( b * BloomTrieIndexerBackend ) Process ( header * types . Header ) {
func ( b * BloomTrieIndexerBackend ) Process ( ctx context . Context , header * types . Header ) error {
num := header . Number . Uint64 ( ) - b . section * BloomTrieFrequency
num := header . Number . Uint64 ( ) - b . section * BloomTrieFrequency
if ( num + 1 ) % b . parentSectionSize == 0 {
if ( num + 1 ) % b . parentSectionSize == 0 {
b . sectionHeads [ num / b . parentSectionSize ] = header . Hash ( )
b . sectionHeads [ num / b . parentSectionSize ] = header . Hash ( )
}
}
return nil
}
}
// Commit implements core.ChainIndexerBackend
// Commit implements core.ChainIndexerBackend
@ -300,7 +394,7 @@ func (b *BloomTrieIndexerBackend) Commit() error {
b . triedb . Commit ( root , false )
b . triedb . Commit ( root , false )
sectionHead := b . sectionHeads [ b . bloomTrieRatio - 1 ]
sectionHead := b . sectionHeads [ b . bloomTrieRatio - 1 ]
log . Info ( "Storing bloom trie" , "section" , b . section , "head" , sectionHead , "root" , root , "compression" , float64 ( compSize ) / float64 ( decompSize ) )
log . Info ( "Storing bloom trie" , "section" , b . section , "head" , fmt . Sprintf ( "%064x" , sectionHead ) , "root" , fmt . Sprintf ( "%064x" , root ) , "compression" , float64 ( compSize ) / float64 ( decompSize ) )
StoreBloomTrieRoot ( b . diskdb , b . section , sectionHead , root )
StoreBloomTrieRoot ( b . diskdb , b . section , sectionHead , root )
return nil
return nil