core/state/snapshot: bloom, metrics and prefetcher fixes

pull/20152/head
Péter Szilágyi 5 years ago
parent 3ad4335acc
commit 22c494d399
No known key found for this signature in database
GPG Key ID: E9AE538CEDF8293D
  1. 36
      core/state/snapshot/difflayer.go
  2. 14
      core/state/snapshot/disklayer.go
  3. 7
      core/state/snapshot/snapshot.go
  4. 11
      core/state_prefetcher.go

@ -43,9 +43,11 @@ var (
// aggregatorItemLimit is an approximate number of items that will end up // aggregatorItemLimit is an approximate number of items that will end up
// in the agregator layer before it's flushed out to disk. A plain account // in the agregator layer before it's flushed out to disk. A plain account
// weighs around 14B (+hash), a storage slot 32B (+hash), so 50 is a very // weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
// rough average of what we might see. // 0B (+hash). Slots are mostly set/unset in lockstep, so thet average at
aggregatorItemLimit = aggregatorMemoryLimit / 55 // 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
// smaller number to be on the safe side.
aggregatorItemLimit = aggregatorMemoryLimit / 42
// bloomTargetError is the target false positive rate when the aggregator // bloomTargetError is the target false positive rate when the aggregator
// layer is at its fullest. The actual value will probably move around up // layer is at its fullest. The actual value will probably move around up
@ -269,13 +271,13 @@ func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) {
return dl.origin.AccountRLP(hash) return dl.origin.AccountRLP(hash)
} }
// The bloom filter hit, start poking in the internal maps // The bloom filter hit, start poking in the internal maps
return dl.accountRLP(hash) return dl.accountRLP(hash, 0)
} }
// accountRLP is an internal version of AccountRLP that skips the bloom filter // accountRLP is an internal version of AccountRLP that skips the bloom filter
// checks and uses the internal maps to try and retrieve the data. It's meant // checks and uses the internal maps to try and retrieve the data. It's meant
// to be used if a higher layer's bloom filter hit already. // to be used if a higher layer's bloom filter hit already.
func (dl *diffLayer) accountRLP(hash common.Hash) ([]byte, error) { func (dl *diffLayer) accountRLP(hash common.Hash, depth int) ([]byte, error) {
dl.lock.RLock() dl.lock.RLock()
defer dl.lock.RUnlock() defer dl.lock.RUnlock()
@ -288,13 +290,18 @@ func (dl *diffLayer) accountRLP(hash common.Hash) ([]byte, error) {
// deleted, and is a different notion than an unknown account! // deleted, and is a different notion than an unknown account!
if data, ok := dl.accountData[hash]; ok { if data, ok := dl.accountData[hash]; ok {
snapshotDirtyAccountHitMeter.Mark(1) snapshotDirtyAccountHitMeter.Mark(1)
snapshotDirtyAccountReadMeter.Mark(int64(len(data))) snapshotDirtyAccountHitDepthHist.Update(int64(depth))
if n := len(data); n > 0 {
snapshotDirtyAccountReadMeter.Mark(int64(n))
} else {
snapshotDirtyAccountInexMeter.Mark(1)
}
snapshotBloomAccountTrueHitMeter.Mark(1) snapshotBloomAccountTrueHitMeter.Mark(1)
return data, nil return data, nil
} }
// Account unknown to this diff, resolve from parent // Account unknown to this diff, resolve from parent
if diff, ok := dl.parent.(*diffLayer); ok { if diff, ok := dl.parent.(*diffLayer); ok {
return diff.accountRLP(hash) return diff.accountRLP(hash, depth+1)
} }
// Failed to resolve through diff layers, mark a bloom error and use the disk // Failed to resolve through diff layers, mark a bloom error and use the disk
snapshotBloomAccountFalseHitMeter.Mark(1) snapshotBloomAccountFalseHitMeter.Mark(1)
@ -318,13 +325,13 @@ func (dl *diffLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
return dl.origin.Storage(accountHash, storageHash) return dl.origin.Storage(accountHash, storageHash)
} }
// The bloom filter hit, start poking in the internal maps // The bloom filter hit, start poking in the internal maps
return dl.storage(accountHash, storageHash) return dl.storage(accountHash, storageHash, 0)
} }
// storage is an internal version of Storage that skips the bloom filter checks // storage is an internal version of Storage that skips the bloom filter checks
// and uses the internal maps to try and retrieve the data. It's meant to be // and uses the internal maps to try and retrieve the data. It's meant to be
// used if a higher layer's bloom filter hit already. // used if a higher layer's bloom filter hit already.
func (dl *diffLayer) storage(accountHash, storageHash common.Hash) ([]byte, error) { func (dl *diffLayer) storage(accountHash, storageHash common.Hash, depth int) ([]byte, error) {
dl.lock.RLock() dl.lock.RLock()
defer dl.lock.RUnlock() defer dl.lock.RUnlock()
@ -338,19 +345,26 @@ func (dl *diffLayer) storage(accountHash, storageHash common.Hash) ([]byte, erro
if storage, ok := dl.storageData[accountHash]; ok { if storage, ok := dl.storageData[accountHash]; ok {
if storage == nil { if storage == nil {
snapshotDirtyStorageHitMeter.Mark(1) snapshotDirtyStorageHitMeter.Mark(1)
snapshotDirtyStorageHitDepthHist.Update(int64(depth))
snapshotDirtyStorageInexMeter.Mark(1)
snapshotBloomStorageTrueHitMeter.Mark(1) snapshotBloomStorageTrueHitMeter.Mark(1)
return nil, nil return nil, nil
} }
if data, ok := storage[storageHash]; ok { if data, ok := storage[storageHash]; ok {
snapshotDirtyStorageHitMeter.Mark(1) snapshotDirtyStorageHitMeter.Mark(1)
snapshotDirtyStorageReadMeter.Mark(int64(len(data))) snapshotDirtyStorageHitDepthHist.Update(int64(depth))
if n := len(data); n > 0 {
snapshotDirtyStorageReadMeter.Mark(int64(n))
} else {
snapshotDirtyStorageInexMeter.Mark(1)
}
snapshotBloomStorageTrueHitMeter.Mark(1) snapshotBloomStorageTrueHitMeter.Mark(1)
return data, nil return data, nil
} }
} }
// Storage slot unknown to this diff, resolve from parent // Storage slot unknown to this diff, resolve from parent
if diff, ok := dl.parent.(*diffLayer); ok { if diff, ok := dl.parent.(*diffLayer); ok {
return diff.storage(accountHash, storageHash) return diff.storage(accountHash, storageHash, depth+1)
} }
// Failed to resolve through diff layers, mark a bloom error and use the disk // Failed to resolve through diff layers, mark a bloom error and use the disk
snapshotBloomStorageFalseHitMeter.Mark(1) snapshotBloomStorageFalseHitMeter.Mark(1)

@ -104,8 +104,11 @@ func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) {
dl.cache.Set(hash[:], blob) dl.cache.Set(hash[:], blob)
snapshotCleanAccountMissMeter.Mark(1) snapshotCleanAccountMissMeter.Mark(1)
snapshotCleanAccountWriteMeter.Mark(int64(len(blob))) if n := len(blob); n > 0 {
snapshotCleanAccountWriteMeter.Mark(int64(n))
} else {
snapshotCleanAccountInexMeter.Mark(1)
}
return blob, nil return blob, nil
} }
@ -141,8 +144,11 @@ func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, erro
dl.cache.Set(key, blob) dl.cache.Set(key, blob)
snapshotCleanStorageMissMeter.Mark(1) snapshotCleanStorageMissMeter.Mark(1)
snapshotCleanStorageWriteMeter.Mark(int64(len(blob))) if n := len(blob); n > 0 {
snapshotCleanStorageWriteMeter.Mark(int64(n))
} else {
snapshotCleanStorageInexMeter.Mark(1)
}
return blob, nil return blob, nil
} }

@ -34,24 +34,31 @@ import (
var ( var (
snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil) snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil) snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
snapshotCleanAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil) snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil) snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil) snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil) snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
snapshotCleanStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil) snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil) snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil) snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil) snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
snapshotDirtyAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil) snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil) snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil) snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil) snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil) snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil) snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil) snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil) snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil) snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)

@ -54,6 +54,7 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
gaspool = new(GasPool).AddGas(block.GasLimit()) gaspool = new(GasPool).AddGas(block.GasLimit())
) )
// Iterate over and process the individual transactions // Iterate over and process the individual transactions
byzantium := p.config.IsByzantium(block.Number())
for i, tx := range block.Transactions() { for i, tx := range block.Transactions() {
// If block precaching was interrupted, abort // If block precaching was interrupted, abort
if interrupt != nil && atomic.LoadUint32(interrupt) == 1 { if interrupt != nil && atomic.LoadUint32(interrupt) == 1 {
@ -64,9 +65,15 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c
if err := precacheTransaction(p.config, p.bc, nil, gaspool, statedb, header, tx, cfg); err != nil { if err := precacheTransaction(p.config, p.bc, nil, gaspool, statedb, header, tx, cfg); err != nil {
return // Ugh, something went horribly wrong, bail out return // Ugh, something went horribly wrong, bail out
} }
// If we're pre-byzantium, pre-load trie nodes for the intermediate root
if !byzantium {
statedb.IntermediateRoot(true)
}
}
// If were post-byzantium, pre-load trie nodes for the final root hash
if byzantium {
statedb.IntermediateRoot(true)
} }
// All transactions processed, finalize the block to force loading written-only trie paths
statedb.Finalise(true) // TODO(karalabe): should we run this on interrupt too?
} }
// precacheTransaction attempts to apply a transaction to the given state database // precacheTransaction attempts to apply a transaction to the given state database

Loading…
Cancel
Save