|
|
@ -272,33 +272,43 @@ func expandNode(hash hashNode, n node) node { |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Config defines all necessary options for database.
|
|
|
|
|
|
|
|
type Config struct { |
|
|
|
|
|
|
|
Cache int // Memory allowance (MB) to use for caching trie nodes in memory
|
|
|
|
|
|
|
|
Journal string // Journal of clean cache to survive node restarts
|
|
|
|
|
|
|
|
Preimages bool // Flag whether the preimage of trie key is recorded
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// NewDatabase creates a new trie database to store ephemeral trie content before
|
|
|
|
// NewDatabase creates a new trie database to store ephemeral trie content before
|
|
|
|
// its written out to disk or garbage collected. No read cache is created, so all
|
|
|
|
// its written out to disk or garbage collected. No read cache is created, so all
|
|
|
|
// data retrievals will hit the underlying disk database.
|
|
|
|
// data retrievals will hit the underlying disk database.
|
|
|
|
func NewDatabase(diskdb ethdb.KeyValueStore) *Database { |
|
|
|
func NewDatabase(diskdb ethdb.KeyValueStore) *Database { |
|
|
|
return NewDatabaseWithCache(diskdb, 0, "") |
|
|
|
return NewDatabaseWithConfig(diskdb, nil) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// NewDatabaseWithCache creates a new trie database to store ephemeral trie content
|
|
|
|
// NewDatabaseWithConfig creates a new trie database to store ephemeral trie content
|
|
|
|
// before its written out to disk or garbage collected. It also acts as a read cache
|
|
|
|
// before its written out to disk or garbage collected. It also acts as a read cache
|
|
|
|
// for nodes loaded from disk.
|
|
|
|
// for nodes loaded from disk.
|
|
|
|
func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int, journal string) *Database { |
|
|
|
func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { |
|
|
|
var cleans *fastcache.Cache |
|
|
|
var cleans *fastcache.Cache |
|
|
|
if cache > 0 { |
|
|
|
if config != nil && config.Cache > 0 { |
|
|
|
if journal == "" { |
|
|
|
if config.Journal == "" { |
|
|
|
cleans = fastcache.New(cache * 1024 * 1024) |
|
|
|
cleans = fastcache.New(config.Cache * 1024 * 1024) |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
cleans = fastcache.LoadFromFileOrNew(journal, cache*1024*1024) |
|
|
|
cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
return &Database{ |
|
|
|
db := &Database{ |
|
|
|
diskdb: diskdb, |
|
|
|
diskdb: diskdb, |
|
|
|
cleans: cleans, |
|
|
|
cleans: cleans, |
|
|
|
dirties: map[common.Hash]*cachedNode{{}: { |
|
|
|
dirties: map[common.Hash]*cachedNode{{}: { |
|
|
|
children: make(map[common.Hash]uint16), |
|
|
|
children: make(map[common.Hash]uint16), |
|
|
|
}}, |
|
|
|
}}, |
|
|
|
preimages: make(map[common.Hash][]byte), |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
if config == nil || config.Preimages { // TODO(karalabe): Flip to default off in the future
|
|
|
|
|
|
|
|
db.preimages = make(map[common.Hash][]byte) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
return db |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
// DiskDB retrieves the persistent storage backing the trie database.
|
|
|
|
// DiskDB retrieves the persistent storage backing the trie database.
|
|
|
@ -345,6 +355,11 @@ func (db *Database) insert(hash common.Hash, size int, node node) { |
|
|
|
//
|
|
|
|
//
|
|
|
|
// Note, this method assumes that the database's lock is held!
|
|
|
|
// Note, this method assumes that the database's lock is held!
|
|
|
|
func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { |
|
|
|
func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { |
|
|
|
|
|
|
|
// Short circuit if preimage collection is disabled
|
|
|
|
|
|
|
|
if db.preimages == nil { |
|
|
|
|
|
|
|
return |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Track the preimage if a yet unknown one
|
|
|
|
if _, ok := db.preimages[hash]; ok { |
|
|
|
if _, ok := db.preimages[hash]; ok { |
|
|
|
return |
|
|
|
return |
|
|
|
} |
|
|
|
} |
|
|
@ -431,6 +446,10 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { |
|
|
|
// preimage retrieves a cached trie node pre-image from memory. If it cannot be
|
|
|
|
// preimage retrieves a cached trie node pre-image from memory. If it cannot be
|
|
|
|
// found cached, the method queries the persistent database for the content.
|
|
|
|
// found cached, the method queries the persistent database for the content.
|
|
|
|
func (db *Database) preimage(hash common.Hash) []byte { |
|
|
|
func (db *Database) preimage(hash common.Hash) []byte { |
|
|
|
|
|
|
|
// Short circuit if preimage collection is disabled
|
|
|
|
|
|
|
|
if db.preimages == nil { |
|
|
|
|
|
|
|
return nil |
|
|
|
|
|
|
|
} |
|
|
|
// Retrieve the node from cache if available
|
|
|
|
// Retrieve the node from cache if available
|
|
|
|
db.lock.RLock() |
|
|
|
db.lock.RLock() |
|
|
|
preimage := db.preimages[hash] |
|
|
|
preimage := db.preimages[hash] |
|
|
@ -588,6 +607,9 @@ func (db *Database) Cap(limit common.StorageSize) error { |
|
|
|
// leave for later to deduplicate writes.
|
|
|
|
// leave for later to deduplicate writes.
|
|
|
|
flushPreimages := db.preimagesSize > 4*1024*1024 |
|
|
|
flushPreimages := db.preimagesSize > 4*1024*1024 |
|
|
|
if flushPreimages { |
|
|
|
if flushPreimages { |
|
|
|
|
|
|
|
if db.preimages == nil { |
|
|
|
|
|
|
|
log.Error("Attempted to write preimages whilst disabled") |
|
|
|
|
|
|
|
} else { |
|
|
|
rawdb.WritePreimages(batch, db.preimages) |
|
|
|
rawdb.WritePreimages(batch, db.preimages) |
|
|
|
if batch.ValueSize() > ethdb.IdealBatchSize { |
|
|
|
if batch.ValueSize() > ethdb.IdealBatchSize { |
|
|
|
if err := batch.Write(); err != nil { |
|
|
|
if err := batch.Write(); err != nil { |
|
|
@ -596,6 +618,7 @@ func (db *Database) Cap(limit common.StorageSize) error { |
|
|
|
batch.Reset() |
|
|
|
batch.Reset() |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
// Keep committing nodes from the flush-list until we're below allowance
|
|
|
|
// Keep committing nodes from the flush-list until we're below allowance
|
|
|
|
oldest := db.oldest |
|
|
|
oldest := db.oldest |
|
|
|
for size > limit && oldest != (common.Hash{}) { |
|
|
|
for size > limit && oldest != (common.Hash{}) { |
|
|
@ -630,8 +653,12 @@ func (db *Database) Cap(limit common.StorageSize) error { |
|
|
|
defer db.lock.Unlock() |
|
|
|
defer db.lock.Unlock() |
|
|
|
|
|
|
|
|
|
|
|
if flushPreimages { |
|
|
|
if flushPreimages { |
|
|
|
|
|
|
|
if db.preimages == nil { |
|
|
|
|
|
|
|
log.Error("Attempted to reset preimage cache whilst disabled") |
|
|
|
|
|
|
|
} else { |
|
|
|
db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 |
|
|
|
db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
} |
|
|
|
for db.oldest != oldest { |
|
|
|
for db.oldest != oldest { |
|
|
|
node := db.dirties[db.oldest] |
|
|
|
node := db.dirties[db.oldest] |
|
|
|
delete(db.dirties, db.oldest) |
|
|
|
delete(db.dirties, db.oldest) |
|
|
@ -674,6 +701,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H |
|
|
|
batch := db.diskdb.NewBatch() |
|
|
|
batch := db.diskdb.NewBatch() |
|
|
|
|
|
|
|
|
|
|
|
// Move all of the accumulated preimages into a write batch
|
|
|
|
// Move all of the accumulated preimages into a write batch
|
|
|
|
|
|
|
|
if db.preimages != nil { |
|
|
|
rawdb.WritePreimages(batch, db.preimages) |
|
|
|
rawdb.WritePreimages(batch, db.preimages) |
|
|
|
if batch.ValueSize() > ethdb.IdealBatchSize { |
|
|
|
if batch.ValueSize() > ethdb.IdealBatchSize { |
|
|
|
if err := batch.Write(); err != nil { |
|
|
|
if err := batch.Write(); err != nil { |
|
|
@ -687,7 +715,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H |
|
|
|
return err |
|
|
|
return err |
|
|
|
} |
|
|
|
} |
|
|
|
batch.Reset() |
|
|
|
batch.Reset() |
|
|
|
|
|
|
|
} |
|
|
|
// Move the trie itself into the batch, flushing if enough data is accumulated
|
|
|
|
// Move the trie itself into the batch, flushing if enough data is accumulated
|
|
|
|
nodes, storage := len(db.dirties), db.dirtiesSize |
|
|
|
nodes, storage := len(db.dirties), db.dirtiesSize |
|
|
|
|
|
|
|
|
|
|
@ -709,8 +737,9 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H |
|
|
|
batch.Reset() |
|
|
|
batch.Reset() |
|
|
|
|
|
|
|
|
|
|
|
// Reset the storage counters and bumpd metrics
|
|
|
|
// Reset the storage counters and bumpd metrics
|
|
|
|
|
|
|
|
if db.preimages != nil { |
|
|
|
db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 |
|
|
|
db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 |
|
|
|
|
|
|
|
} |
|
|
|
memcacheCommitTimeTimer.Update(time.Since(start)) |
|
|
|
memcacheCommitTimeTimer.Update(time.Since(start)) |
|
|
|
memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) |
|
|
|
memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) |
|
|
|
memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) |
|
|
|
memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) |
|
|
|