core/rawdb: use AncientRange when initializing leveldb from freezer (#23612)

* core/rawdb: utilize AncientRange when initiating from freezer

* core/rawdb: remove debug sanity check
verkle/onleaf
Martin Holst Swende 3 years ago committed by GitHub
parent 50e07a1e16
commit 347c37b362
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 23
      core/rawdb/chain_iterator.go

@ -44,17 +44,20 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
logged = start.Add(-7 * time.Second) // Unindex during import is fast, don't double log
hash common.Hash
)
for i := uint64(0); i < frozen; i++ {
// Since the freezer has all data in sequential order on a file,
// it would be 'neat' to read more data in one go, and let the
// freezerdb return N items (e.g up to 1000 items per go)
// That would require an API change in Ancients though
if h, err := db.Ancient(freezerHashTable, i); err != nil {
for i := uint64(0); i < frozen; {
// We read 100K hashes at a time, for a total of 3.2M
count := uint64(100_000)
if i+count > frozen {
count = frozen - i
}
data, err := db.AncientRange(freezerHashTable, i, count, 32*count)
if err != nil {
log.Crit("Failed to init database from freezer", "err", err)
} else {
hash = common.BytesToHash(h)
}
WriteHeaderNumber(batch, hash, i)
for j, h := range data {
number := i + uint64(j)
hash = common.BytesToHash(h)
WriteHeaderNumber(batch, hash, number)
// If enough data was accumulated in memory or we're at the last block, dump to disk
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
@ -62,6 +65,8 @@ func InitDatabaseFromFreezer(db ethdb.Database) {
}
batch.Reset()
}
}
i += uint64(len(data))
// If we've spent too much time already, notify the user of what we're doing
if time.Since(logged) > 8*time.Second {
log.Info("Initializing database from freezer", "total", frozen, "number", i, "hash", hash, "elapsed", common.PrettyDuration(time.Since(start)))

Loading…
Cancel
Save