|
|
|
@ -21,6 +21,7 @@ import ( |
|
|
|
|
"fmt" |
|
|
|
|
"io" |
|
|
|
|
"reflect" |
|
|
|
|
"runtime" |
|
|
|
|
"sync" |
|
|
|
|
"time" |
|
|
|
|
|
|
|
|
@ -278,16 +279,20 @@ func expandNode(hash hashNode, n node) node { |
|
|
|
|
// its written out to disk or garbage collected. No read cache is created, so all
|
|
|
|
|
// data retrievals will hit the underlying disk database.
|
|
|
|
|
func NewDatabase(diskdb ethdb.KeyValueStore) *Database { |
|
|
|
|
return NewDatabaseWithCache(diskdb, 0) |
|
|
|
|
return NewDatabaseWithCache(diskdb, 0, "") |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// NewDatabaseWithCache creates a new trie database to store ephemeral trie content
|
|
|
|
|
// before its written out to disk or garbage collected. It also acts as a read cache
|
|
|
|
|
// for nodes loaded from disk.
|
|
|
|
|
func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int) *Database { |
|
|
|
|
func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int, journal string) *Database { |
|
|
|
|
var cleans *fastcache.Cache |
|
|
|
|
if cache > 0 { |
|
|
|
|
if journal == "" { |
|
|
|
|
cleans = fastcache.New(cache * 1024 * 1024) |
|
|
|
|
} else { |
|
|
|
|
cleans = fastcache.LoadFromFileOrNew(journal, cache*1024*1024) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
return &Database{ |
|
|
|
|
diskdb: diskdb, |
|
|
|
@ -867,3 +872,43 @@ func (db *Database) Size() (common.StorageSize, common.StorageSize) { |
|
|
|
|
var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) |
|
|
|
|
return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// saveCache saves clean state cache to given directory path
|
|
|
|
|
// using specified CPU cores.
|
|
|
|
|
func (db *Database) saveCache(dir string, threads int) error { |
|
|
|
|
if db.cleans == nil { |
|
|
|
|
return nil |
|
|
|
|
} |
|
|
|
|
log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) |
|
|
|
|
|
|
|
|
|
start := time.Now() |
|
|
|
|
err := db.cleans.SaveToFileConcurrent(dir, threads) |
|
|
|
|
if err != nil { |
|
|
|
|
log.Error("Failed to persist clean trie cache", "error", err) |
|
|
|
|
return err |
|
|
|
|
} |
|
|
|
|
log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) |
|
|
|
|
return nil |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// SaveCache atomically saves fast cache data to the given dir using all
|
|
|
|
|
// available CPU cores.
|
|
|
|
|
func (db *Database) SaveCache(dir string) error { |
|
|
|
|
return db.saveCache(dir, runtime.GOMAXPROCS(0)) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// SaveCachePeriodically atomically saves fast cache data to the given dir with
|
|
|
|
|
// the specified interval. All dump operation will only use a single CPU core.
|
|
|
|
|
func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { |
|
|
|
|
ticker := time.NewTicker(interval) |
|
|
|
|
defer ticker.Stop() |
|
|
|
|
|
|
|
|
|
for { |
|
|
|
|
select { |
|
|
|
|
case <-ticker.C: |
|
|
|
|
db.saveCache(dir, 1) |
|
|
|
|
case <-stopCh: |
|
|
|
|
return |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|