eth/protocols/snap, eth/downloader: don't use bloom filter in snap sync

pull/22513/head
Martin Holst Swende 4 years ago
parent 91726e8aad
commit 410089afea
No known key found for this signature in database
GPG Key ID: 683B438C05A5DDF0
  1. 2
      eth/downloader/downloader.go
  2. 6
      eth/handler.go
  3. 13
      eth/protocols/snap/sync.go
  4. 2
      eth/protocols/snap/sync_test.go

@ -240,7 +240,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom,
headerProcCh: make(chan []*types.Header, 1),
quitCh: make(chan struct{}),
stateCh: make(chan dataPack),
SnapSyncer: snap.NewSyncer(stateDb, stateBloom),
SnapSyncer: snap.NewSyncer(stateDb),
stateSyncStart: make(chan *stateSync),
syncStatsState: stateSyncStats{
processed: rawdb.ReadFastTrieProgress(stateDb),

@ -177,7 +177,11 @@ func newHandler(config *handlerConfig) (*handler, error) {
// Construct the downloader (long sync) and its backing state bloom if fast
// sync is requested. The downloader is responsible for deallocating the state
// bloom when it's done.
if atomic.LoadUint32(&h.fastSync) == 1 {
// Note: we don't enable it if snap-sync is performed, since it's very heavy
// and the heal-portion of the snap sync is much lighter than fast. What we particularly
// want to avoid, is a 90%-finished (but restarted) snap-sync to begin
// indexing the entire trie
if atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 {
h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database)
}
h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer)

@ -376,8 +376,7 @@ type SyncPeer interface {
// - The peer delivers a stale response after a previous timeout
// - The peer delivers a refusal to serve the requested state
type Syncer struct {
db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
bloom *trie.SyncBloom // Bloom filter to deduplicate nodes for state fixup
db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)
root common.Hash // Current state trie root being synced
tasks []*accountTask // Current account task set being synced
@ -446,10 +445,9 @@ type Syncer struct {
// NewSyncer creates a new snapshot syncer to download the Ethereum state over the
// snap protocol.
func NewSyncer(db ethdb.KeyValueStore, bloom *trie.SyncBloom) *Syncer {
func NewSyncer(db ethdb.KeyValueStore) *Syncer {
return &Syncer{
db: db,
bloom: bloom,
db: db,
peers: make(map[string]SyncPeer),
peerJoin: new(event.Feed),
@ -546,7 +544,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {
s.lock.Lock()
s.root = root
s.healer = &healTask{
scheduler: state.NewStateSync(root, s.db, s.bloom),
scheduler: state.NewStateSync(root, s.db, nil),
trieTasks: make(map[common.Hash]trie.SyncPath),
codeTasks: make(map[common.Hash]struct{}),
}
@ -1660,7 +1658,6 @@ func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {
bytes += common.StorageSize(len(code))
rawdb.WriteCode(batch, hash, code)
s.bloom.Add(hash[:])
}
if err := batch.Write(); err != nil {
log.Crit("Failed to persist bytecodes", "err", err)
@ -1796,7 +1793,6 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
}
// Node is not a boundary, persist to disk
batch.Put(it.Key(), it.Value())
s.bloom.Add(it.Key())
bytes += common.StorageSize(common.HashLength + len(it.Value()))
nodes++
@ -1953,7 +1949,6 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
}
// Node is neither a boundary, not an incomplete account, persist to disk
batch.Put(it.Key(), it.Value())
s.bloom.Add(it.Key())
bytes += common.StorageSize(common.HashLength + len(it.Value()))
nodes++

@ -525,7 +525,7 @@ func TestSyncBloatedProof(t *testing.T) {
func setupSyncer(peers ...*testPeer) *Syncer {
stateDb := rawdb.NewMemoryDatabase()
syncer := NewSyncer(stateDb, trie.NewSyncBloom(1, stateDb))
syncer := NewSyncer(stateDb)
for _, peer := range peers {
syncer.Register(peer)
peer.remote = syncer

Loading…
Cancel
Save