From 410089afea1907d4abbf1dc4d1b42bda1094a5e5 Mon Sep 17 00:00:00 2001 From: Martin Holst Swende Date: Wed, 17 Mar 2021 09:36:34 +0100 Subject: [PATCH] eth/protocols/snap, eth/downloader: don't use bloom filter in snap sync --- eth/downloader/downloader.go | 2 +- eth/handler.go | 6 +++++- eth/protocols/snap/sync.go | 13 ++++--------- eth/protocols/snap/sync_test.go | 2 +- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 5ddd2f984..a5ed3761b 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -240,7 +240,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, headerProcCh: make(chan []*types.Header, 1), quitCh: make(chan struct{}), stateCh: make(chan dataPack), - SnapSyncer: snap.NewSyncer(stateDb, stateBloom), + SnapSyncer: snap.NewSyncer(stateDb), stateSyncStart: make(chan *stateSync), syncStatsState: stateSyncStats{ processed: rawdb.ReadFastTrieProgress(stateDb), diff --git a/eth/handler.go b/eth/handler.go index 13fa70193..11c8565de 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -177,7 +177,11 @@ func newHandler(config *handlerConfig) (*handler, error) { // Construct the downloader (long sync) and its backing state bloom if fast // sync is requested. The downloader is responsible for deallocating the state // bloom when it's done. - if atomic.LoadUint32(&h.fastSync) == 1 { + // Note: we don't enable it if snap-sync is performed, since it's very heavy + // and the heal-portion of the snap sync is much lighter than fast. What we particularly + // want to avoid, is a 90%-finished (but restarted) snap-sync to begin + // indexing the entire trie + if atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 { h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database) } h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer) diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 1cfdef15b..0303e65ed 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -376,8 +376,7 @@ type SyncPeer interface { // - The peer delivers a stale response after a previous timeout // - The peer delivers a refusal to serve the requested state type Syncer struct { - db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) - bloom *trie.SyncBloom // Bloom filter to deduplicate nodes for state fixup + db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) root common.Hash // Current state trie root being synced tasks []*accountTask // Current account task set being synced @@ -446,10 +445,9 @@ type Syncer struct { // NewSyncer creates a new snapshot syncer to download the Ethereum state over the // snap protocol. -func NewSyncer(db ethdb.KeyValueStore, bloom *trie.SyncBloom) *Syncer { +func NewSyncer(db ethdb.KeyValueStore) *Syncer { return &Syncer{ - db: db, - bloom: bloom, + db: db, peers: make(map[string]SyncPeer), peerJoin: new(event.Feed), @@ -546,7 +544,7 @@ func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { s.lock.Lock() s.root = root s.healer = &healTask{ - scheduler: state.NewStateSync(root, s.db, s.bloom), + scheduler: state.NewStateSync(root, s.db, nil), trieTasks: make(map[common.Hash]trie.SyncPath), codeTasks: make(map[common.Hash]struct{}), } @@ -1660,7 +1658,6 @@ func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) { bytes += common.StorageSize(len(code)) rawdb.WriteCode(batch, hash, code) - s.bloom.Add(hash[:]) } if err := batch.Write(); err != nil { log.Crit("Failed to persist bytecodes", "err", err) @@ -1796,7 +1793,6 @@ func (s *Syncer) processStorageResponse(res *storageResponse) { } // Node is not a boundary, persist to disk batch.Put(it.Key(), it.Value()) - s.bloom.Add(it.Key()) bytes += common.StorageSize(common.HashLength + len(it.Value())) nodes++ @@ -1953,7 +1949,6 @@ func (s *Syncer) forwardAccountTask(task *accountTask) { } // Node is neither a boundary, not an incomplete account, persist to disk batch.Put(it.Key(), it.Value()) - s.bloom.Add(it.Key()) bytes += common.StorageSize(common.HashLength + len(it.Value())) nodes++ diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go index 0b048786e..49dff7bb3 100644 --- a/eth/protocols/snap/sync_test.go +++ b/eth/protocols/snap/sync_test.go @@ -525,7 +525,7 @@ func TestSyncBloatedProof(t *testing.T) { func setupSyncer(peers ...*testPeer) *Syncer { stateDb := rawdb.NewMemoryDatabase() - syncer := NewSyncer(stateDb, trie.NewSyncBloom(1, stateDb)) + syncer := NewSyncer(stateDb) for _, peer := range peers { syncer.Register(peer) peer.remote = syncer