troedb/pathdb: implement flat state iterator in pathdb

pull/30654/head
Gary Rong 2 weeks ago
parent dab10e18ff
commit 409da23e11
  1. 18
      triedb/pathdb/database.go
  2. 97
      triedb/pathdb/holdable_iterator.go
  3. 177
      triedb/pathdb/holdable_iterator_test.go
  4. 407
      triedb/pathdb/iterator.go
  5. 290
      triedb/pathdb/iterator_binary.go
  6. 372
      triedb/pathdb/iterator_fast.go
  7. 1047
      triedb/pathdb/iterator_test.go
  8. 11
      triedb/pathdb/reader.go
  9. 4
      triedb/pathdb/states.go

@ -555,3 +555,21 @@ func (db *Database) StorageHistory(address common.Address, slot common.Hash, sta
func (db *Database) HistoryRange() (uint64, uint64, error) { func (db *Database) HistoryRange() (uint64, uint64, error) {
return historyRange(db.freezer) return historyRange(db.freezer)
} }
// AccountIterator creates a new account iterator for the specified root hash and
// seeks to a starting account hash.
func (db *Database) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
//if gen := db.tree.bottom().generator; gen != nil && !gen.completed() {
// return nil, errNotConstructed
//}
return newFastAccountIterator(db, root, seek)
}
// StorageIterator creates a new storage iterator for the specified root hash and
// account. The iterator will be move to the specific start position.
func (db *Database) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
//if gen := db.tree.bottom().generator; gen != nil && !gen.completed() {
// return nil, errNotConstructed
//}
return newFastStorageIterator(db, root, account, seek)
}

@ -0,0 +1,97 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
)
// holdableIterator is a wrapper of underlying database iterator. It extends
// the basic iterator interface by adding Hold which can hold the element
// locally where the iterator is currently located and serve it up next time.
type holdableIterator struct {
it ethdb.Iterator
key []byte
val []byte
atHeld bool
}
// newHoldableIterator initializes the holdableIterator with the given iterator.
func newHoldableIterator(it ethdb.Iterator) *holdableIterator {
return &holdableIterator{it: it}
}
// Hold holds the element locally where the iterator is currently located which
// can be served up next time.
func (it *holdableIterator) Hold() {
if it.it.Key() == nil {
return // nothing to hold
}
it.key = common.CopyBytes(it.it.Key())
it.val = common.CopyBytes(it.it.Value())
it.atHeld = false
}
// Next moves the iterator to the next key/value pair. It returns whether the
// iterator is exhausted.
func (it *holdableIterator) Next() bool {
if !it.atHeld && it.key != nil {
it.atHeld = true
} else if it.atHeld {
it.atHeld = false
it.key = nil
it.val = nil
}
if it.key != nil {
return true // shifted to locally held value
}
return it.it.Next()
}
// Error returns any accumulated error. Exhausting all the key/value pairs
// is not considered to be an error.
func (it *holdableIterator) Error() error { return it.it.Error() }
// Release releases associated resources. Release should always succeed and can
// be called multiple times without causing error.
func (it *holdableIterator) Release() {
it.atHeld = false
it.key = nil
it.val = nil
it.it.Release()
}
// Key returns the key of the current key/value pair, or nil if done. The caller
// should not modify the contents of the returned slice, and its contents may
// change on the next call to Next.
func (it *holdableIterator) Key() []byte {
if it.key != nil {
return it.key
}
return it.it.Key()
}
// Value returns the value of the current key/value pair, or nil if done. The
// caller should not modify the contents of the returned slice, and its contents
// may change on the next call to Next.
func (it *holdableIterator) Value() []byte {
if it.val != nil {
return it.val
}
return it.it.Value()
}

@ -0,0 +1,177 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/ethdb/memorydb"
)
func TestIteratorHold(t *testing.T) {
// Create the key-value data store
var (
content = map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}
order = []string{"k1", "k2", "k3"}
db = rawdb.NewMemoryDatabase()
)
for key, val := range content {
if err := db.Put([]byte(key), []byte(val)); err != nil {
t.Fatalf("failed to insert item %s:%s into database: %v", key, val, err)
}
}
// Iterate over the database with the given configs and verify the results
it, idx := newHoldableIterator(db.NewIterator(nil, nil)), 0
// Nothing should be affected for calling Discard on non-initialized iterator
it.Hold()
for it.Next() {
if len(content) <= idx {
t.Errorf("more items than expected: checking idx=%d (key %q), expecting len=%d", idx, it.Key(), len(order))
break
}
if !bytes.Equal(it.Key(), []byte(order[idx])) {
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
}
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
}
// Should be safe to call discard multiple times
it.Hold()
it.Hold()
// Shift iterator to the discarded element
it.Next()
if !bytes.Equal(it.Key(), []byte(order[idx])) {
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
}
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
}
// Discard/Next combo should work always
it.Hold()
it.Next()
if !bytes.Equal(it.Key(), []byte(order[idx])) {
t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
}
if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
}
idx++
}
if err := it.Error(); err != nil {
t.Errorf("iteration failed: %v", err)
}
if idx != len(order) {
t.Errorf("iteration terminated prematurely: have %d, want %d", idx, len(order))
}
db.Close()
}
func TestReopenIterator(t *testing.T) {
var (
content = map[common.Hash]string{
common.HexToHash("a1"): "v1",
common.HexToHash("a2"): "v2",
common.HexToHash("a3"): "v3",
common.HexToHash("a4"): "v4",
common.HexToHash("a5"): "v5",
common.HexToHash("a6"): "v6",
}
order = []common.Hash{
common.HexToHash("a1"),
common.HexToHash("a2"),
common.HexToHash("a3"),
common.HexToHash("a4"),
common.HexToHash("a5"),
common.HexToHash("a6"),
}
db = rawdb.NewMemoryDatabase()
reopen = func(db ethdb.KeyValueStore, iter *holdableIterator) *holdableIterator {
if !iter.Next() {
iter.Release()
return newHoldableIterator(memorydb.New().NewIterator(nil, nil))
}
next := iter.Key()
iter.Release()
return newHoldableIterator(db.NewIterator(rawdb.SnapshotAccountPrefix, next[1:]))
}
_ = reopen
)
for key, val := range content {
rawdb.WriteAccountSnapshot(db, key, []byte(val))
}
checkVal := func(it *holdableIterator, index int) {
if !bytes.Equal(it.Key(), append(rawdb.SnapshotAccountPrefix, order[index].Bytes()...)) {
t.Fatalf("Unexpected data entry key, want %v got %v", order[index], it.Key())
}
if !bytes.Equal(it.Value(), []byte(content[order[index]])) {
t.Fatalf("Unexpected data entry key, want %v got %v", []byte(content[order[index]]), it.Value())
}
}
// Iterate over the database with the given configs and verify the results
dbIter := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
iter, idx := newHoldableIterator(rawdb.NewKeyLengthIterator(dbIter, 1+common.HashLength)), -1
idx++
iter.Next()
checkVal(iter, idx)
iter = reopen(db, iter)
idx++
iter.Next()
checkVal(iter, idx)
// reopen twice
iter = reopen(db, iter)
iter = reopen(db, iter)
idx++
iter.Next()
checkVal(iter, idx)
// reopen iterator with held value
iter.Next()
iter.Hold()
iter = reopen(db, iter)
idx++
iter.Next()
checkVal(iter, idx)
// reopen twice iterator with held value
iter.Next()
iter.Hold()
iter = reopen(db, iter)
iter = reopen(db, iter)
idx++
iter.Next()
checkVal(iter, idx)
// shift to the end and reopen
iter.Next() // the end
iter = reopen(db, iter)
iter.Next()
if iter.Key() != nil {
t.Fatal("Unexpected iterated entry")
}
}

@ -0,0 +1,407 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"fmt"
"sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
)
// Iterator is an iterator to step over all the accounts or the specific
// storage in a snapshot which may or may not be composed of multiple layers.
type Iterator interface {
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. root being iterated
// becomes stale and garbage collected).
Next() bool
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
Error() error
// Hash returns the hash of the account or storage slot the iterator is
// currently at.
Hash() common.Hash
// Release releases associated resources. Release should always succeed and
// can be called multiple times without causing error.
Release()
}
// AccountIterator is an iterator to step over all the accounts in a snapshot,
// which may or may not be composed of multiple layers.
type AccountIterator interface {
Iterator
// Account returns the RLP encoded slim account the iterator is currently at.
// An error will be returned if the iterator becomes invalid
Account() []byte
}
// StorageIterator is an iterator to step over the specific storage in a snapshot,
// which may or may not be composed of multiple layers.
type StorageIterator interface {
Iterator
// Slot returns the storage slot the iterator is currently at. An error will
// be returned if the iterator becomes invalid
Slot() []byte
}
// diffAccountIterator is an account iterator that steps over the accounts (both
// live and deleted) contained within a state set. Higher order iterators will
// use the deleted accounts to skip deeper iterators.
//
// This iterator could be created from the diff layer or the disk layer (the
// aggregated state buffer).
type diffAccountIterator struct {
// curHash is the current hash the iterator is positioned on. The field is
// explicitly tracked since the referenced state set might go stale after
// the iterator was positioned and we don't want to fail accessing the old
// hash as long as the iterator is not touched any more.
curHash common.Hash
states *stateSet // Live state set to retrieve accounts from
stale func() bool // Signal if the referenced state set is stale
keys []common.Hash // Keys left in the layer to iterate
fail error // Any failures encountered (stale)
}
// AccountIterator creates an account iterator over the given state set.
func newDiffAccountIterator(seek common.Hash, states *stateSet, stale func() bool) AccountIterator {
// Seek out the requested starting account
hashes := states.accountList()
index := sort.Search(len(hashes), func(i int) bool {
return bytes.Compare(seek[:], hashes[i][:]) <= 0
})
// Assemble and returned the already seeked iterator
return &diffAccountIterator{
states: states,
stale: stale,
keys: hashes[index:],
}
}
// Next steps the iterator forward one element, returning false if exhausted.
func (it *diffAccountIterator) Next() bool {
// If the iterator was already stale, consider it a programmer error. Although
// we could just return false here, triggering this path would probably mean
// somebody forgot to check for Error, so lets blow up instead of undefined
// behavior that's hard to debug.
if it.fail != nil {
panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
}
// Stop iterating if all keys were exhausted
if len(it.keys) == 0 {
return false
}
if it.stale != nil && it.stale() {
it.fail, it.keys = errSnapshotStale, nil
return false
}
// Iterator seems to be still alive, retrieve and cache the live hash
it.curHash = it.keys[0]
// key cached, shift the iterator and notify the user of success
it.keys = it.keys[1:]
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
func (it *diffAccountIterator) Error() error {
return it.fail
}
// Hash returns the hash of the account the iterator is currently at.
func (it *diffAccountIterator) Hash() common.Hash {
return it.curHash
}
// Account returns the RLP encoded slim account the iterator is currently at.
// This method may _fail_, if the associated state goes stale between the call
// to Next and Account. That type of error will set it.fail.
//
// This method assumes that states modification does not delete elements from
// the account mapping (writing nil into it is fine though), and will panic
// if elements have been deleted.
//
// Note the returned account is not a copy, please don't modify it.
func (it *diffAccountIterator) Account() []byte {
blob, ok := it.states.account(it.curHash)
if !ok {
panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
}
if it.stale != nil && it.stale() {
it.fail, it.keys = errSnapshotStale, nil
}
return blob
}
// Release is a noop for diff account iterators as there are no held resources.
func (it *diffAccountIterator) Release() {}
// diskAccountIterator is an account iterator that steps over the persistent
// accounts within the database.
//
// To simplify, the staleness of the persistent state is not tracked. The disk
// iterator is not intended to be used alone. It should always be wrapped with
// a diff iterator, as the bottom-most disk layer uses both the in-memory
// aggregated buffer and the persistent disk layer as data sources. The staleness
// of the diff iterator is sufficient to invalidate the iterator pair.
type diskAccountIterator struct {
it ethdb.Iterator
}
// newDiskAccountIterator creates an account iterator over the persistent state.
func newDiskAccountIterator(db ethdb.KeyValueStore, seek common.Hash) AccountIterator {
pos := common.TrimRightZeroes(seek[:])
return &diskAccountIterator{
it: db.NewIterator(rawdb.SnapshotAccountPrefix, pos),
}
}
// Next steps the iterator forward one element, returning false if exhausted.
func (it *diskAccountIterator) Next() bool {
// If the iterator was already exhausted, don't bother
if it.it == nil {
return false
}
// Try to advance the iterator and release it if we reached the end
for {
if !it.it.Next() {
it.it.Release()
it.it = nil
return false
}
if len(it.it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
break
}
}
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
//
// A diff layer is immutable after creation content wise and can always be fully
// iterated without error, so this method always returns nil.
func (it *diskAccountIterator) Error() error {
if it.it == nil {
return nil // Iterator is exhausted and released
}
return it.it.Error()
}
// Hash returns the hash of the account the iterator is currently at.
func (it *diskAccountIterator) Hash() common.Hash {
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
}
// Account returns the RLP encoded slim account the iterator is currently at.
func (it *diskAccountIterator) Account() []byte {
return it.it.Value()
}
// Release releases the database snapshot held during iteration.
func (it *diskAccountIterator) Release() {
// The iterator is auto-released on exhaustion, so make sure it's still alive
if it.it != nil {
it.it.Release()
it.it = nil
}
}
// diffStorageIterator is a storage iterator that steps over the specific storage
// (both live and deleted) contained within a state set. Higher order iterators
// will use the deleted slot to skip deeper iterators.
//
// This iterator could be created from the diff layer or the disk layer (the
// aggregated state buffer).
type diffStorageIterator struct {
// curHash is the current hash the iterator is positioned on. The field is
// explicitly tracked since the referenced state set might go stale after
// the iterator was positioned and we don't want to fail accessing the old
// hash as long as the iterator is not touched any more.
curHash common.Hash
account common.Hash
states *stateSet // Live state set to retrieve storage slots from
stale func() bool // Signal if the referenced state set is stale
keys []common.Hash // Keys left in the layer to iterate
fail error // Any failures encountered (stale)
}
// newDiffStorageIterator creates a storage iterator over a single diff layer.
// Except the storage iterator is returned, there is an additional flag
// "destructed" returned. If it's true then it means the whole storage is
// destructed in this layer(maybe recreated too), don't bother deeper layer
// for storage retrieval.
func newDiffStorageIterator(account common.Hash, seek common.Hash, states *stateSet, stale func() bool) (StorageIterator, bool) {
// Create the storage for this account even it's marked
// as destructed. The iterator is for the new one which
// just has the same address as the deleted one.
hashes, destructed := states.storageList(account)
index := sort.Search(len(hashes), func(i int) bool {
return bytes.Compare(seek[:], hashes[i][:]) <= 0
})
// Assemble and returned the already seeked iterator
return &diffStorageIterator{
states: states,
stale: stale,
account: account,
keys: hashes[index:],
}, destructed
}
// Next steps the iterator forward one element, returning false if exhausted.
func (it *diffStorageIterator) Next() bool {
// If the iterator was already stale, consider it a programmer error. Although
// we could just return false here, triggering this path would probably mean
// somebody forgot to check for Error, so lets blow up instead of undefined
// behavior that's hard to debug.
if it.fail != nil {
panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
}
// Stop iterating if all keys were exhausted
if len(it.keys) == 0 {
return false
}
if it.stale != nil && it.stale() {
it.fail, it.keys = errSnapshotStale, nil
return false
}
// Iterator seems to be still alive, retrieve and cache the live hash
it.curHash = it.keys[0]
// key cached, shift the iterator and notify the user of success
it.keys = it.keys[1:]
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
func (it *diffStorageIterator) Error() error {
return it.fail
}
// Hash returns the hash of the storage slot the iterator is currently at.
func (it *diffStorageIterator) Hash() common.Hash {
return it.curHash
}
// Slot returns the raw storage slot value the iterator is currently at.
// This method may _fail_, if the associated state sets is modified between
// the call to Next and Value. That type of error will set it.fail.
// This method assumes that state set modification does not delete elements
// from the storage mapping (writing nil into it is fine though), and will
// panic if elements have been deleted.
//
// Note the returned slot is not a copy, please don't modify it.
func (it *diffStorageIterator) Slot() []byte {
storage, ok := it.states.storage(it.account, it.curHash)
if !ok {
panic(fmt.Sprintf("iterator referenced non-existent storage: %x %x", it.account, it.curHash))
}
if it.stale != nil && it.stale() {
it.fail, it.keys = errSnapshotStale, nil
}
return storage
}
// Release is a noop for diff account iterators as there are no held resources.
func (it *diffStorageIterator) Release() {}
// diskStorageIterator is a storage iterator that steps over the persistent
// storage slots contained within the database.
//
// To simplify, the staleness of the persistent state is not tracked. The disk
// iterator is not intended to be used alone. It should always be wrapped with
// a diff iterator, as the bottom-most disk layer uses both the in-memory
// aggregated buffer and the persistent disk layer as data sources. The staleness
// of the diff iterator is sufficient to invalidate the iterator pair.
type diskStorageIterator struct {
account common.Hash
it ethdb.Iterator
}
// StorageIterator creates a storage iterator over the persistent state.
// If the whole storage is destructed, then all entries in the disk
// layer are deleted already. So the "destructed" flag returned here
// is always false.
func newDiskStorageIterator(db ethdb.KeyValueStore, account common.Hash, seek common.Hash) StorageIterator {
pos := common.TrimRightZeroes(seek[:])
return &diskStorageIterator{
account: account,
it: db.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
}
}
// Next steps the iterator forward one element, returning false if exhausted.
func (it *diskStorageIterator) Next() bool {
// If the iterator was already exhausted, don't bother
if it.it == nil {
return false
}
// Try to advance the iterator and release it if we reached the end
for {
if !it.it.Next() {
it.it.Release()
it.it = nil
return false
}
if len(it.it.Key()) == len(rawdb.SnapshotStoragePrefix)+common.HashLength+common.HashLength {
break
}
}
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
//
// A diff layer is immutable after creation content wise and can always be fully
// iterated without error, so this method always returns nil.
func (it *diskStorageIterator) Error() error {
if it.it == nil {
return nil // Iterator is exhausted and released
}
return it.it.Error()
}
// Hash returns the hash of the storage slot the iterator is currently at.
func (it *diskStorageIterator) Hash() common.Hash {
return common.BytesToHash(it.it.Key()) // The prefix will be truncated
}
// Slot returns the raw storage slot content the iterator is currently at.
func (it *diskStorageIterator) Slot() []byte {
return it.it.Value()
}
// Release releases the database snapshot held during iteration.
func (it *diskStorageIterator) Release() {
// The iterator is auto-released on exhaustion, so make sure it's still alive
if it.it != nil {
it.it.Release()
it.it = nil
}
}

@ -0,0 +1,290 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"github.com/ethereum/go-ethereum/common"
)
// binaryIterator is a simplistic iterator to step over the accounts or storage
// in a snapshot, which may or may not be composed of multiple layers. Performance
// wise this iterator is slow, it's meant for cross validating the fast one,
type binaryIterator struct {
a Iterator
b Iterator
aDone bool
bDone bool
k common.Hash
account common.Hash
fail error
}
// initBinaryAccountIterator creates a simplistic iterator to step over all the
// accounts in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryAccountIterator` as the API.
func (dl *diskLayer) initBinaryAccountIterator() *binaryIterator {
// Create two iterators for state buffer and the persistent state in disk
// respectively and combine them as a binary iterator.
l := &binaryIterator{
a: newDiffAccountIterator(common.Hash{}, dl.buffer.states, dl.isStale),
b: newDiskAccountIterator(dl.db.diskdb, common.Hash{}),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// initBinaryAccountIterator creates a simplistic iterator to step over all the
// accounts in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryAccountIterator` as the API.
func (dl *diffLayer) initBinaryAccountIterator() *binaryIterator {
parent, ok := dl.parent.(*diffLayer)
if !ok {
l := &binaryIterator{
a: newDiffAccountIterator(common.Hash{}, dl.states.stateSet, nil),
b: dl.parent.(*diskLayer).initBinaryAccountIterator(),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
l := &binaryIterator{
a: newDiffAccountIterator(common.Hash{}, dl.states.stateSet, nil),
b: parent.initBinaryAccountIterator(),
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// initBinaryStorageIterator creates a simplistic iterator to step over all the
// storage slots in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryStorageIterator` as the API.
func (dl *diskLayer) initBinaryStorageIterator(account common.Hash) *binaryIterator {
// Create two iterators for state buffer and the persistent state in disk
// respectively and combine them as a binary iterator.
//
// Mark the iterator b as exhausted if the corresponding account is destructed
// in the state buffer.
a, destructed := newDiffStorageIterator(account, common.Hash{}, dl.buffer.states, dl.isStale)
if destructed {
l := &binaryIterator{
a: a,
account: account,
}
l.aDone = !l.a.Next()
l.bDone = true
return l
}
l := &binaryIterator{
a: a,
b: newDiskStorageIterator(dl.db.diskdb, account, common.Hash{}),
account: account,
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// initBinaryStorageIterator creates a simplistic iterator to step over all the
// storage slots in a slow, but easily verifiable way. Note this function is used
// for initialization, use `newBinaryStorageIterator` as the API.
func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) *binaryIterator {
parent, ok := dl.parent.(*diffLayer)
if !ok {
// If the storage in this layer is already destructed, discard all
// deeper layers but still return a valid single-branch iterator.
//
// Diff layer has no stale notion, the callback for checking staleness
// is nil here.
a, destructed := newDiffStorageIterator(account, common.Hash{}, dl.states.stateSet, nil)
if destructed {
l := &binaryIterator{
a: a,
account: account,
}
l.aDone = !l.a.Next()
l.bDone = true
return l
}
l := &binaryIterator{
a: a,
b: dl.parent.(*diskLayer).initBinaryStorageIterator(account),
account: account,
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// If the storage in this layer is already destructed, discard all
// deeper layers but still return a valid single-branch iterator.
//
// Diff layer has no stale notion, the callback for checking staleness
// is nil here.
a, destructed := newDiffStorageIterator(account, common.Hash{}, dl.states.stateSet, nil)
if destructed {
l := &binaryIterator{
a: a,
account: account,
}
l.aDone = !l.a.Next()
l.bDone = true
return l
}
l := &binaryIterator{
a: a,
b: parent.initBinaryStorageIterator(account),
account: account,
}
l.aDone = !l.a.Next()
l.bDone = !l.b.Next()
return l
}
// Next steps the iterator forward one element, returning false if exhausted,
// or an error if iteration failed for some reason (e.g. root being iterated
// becomes stale and garbage collected).
func (it *binaryIterator) Next() bool {
if it.aDone && it.bDone {
return false
}
first:
if it.aDone {
it.k = it.b.Hash()
it.bDone = !it.b.Next()
return true
}
if it.bDone {
it.k = it.a.Hash()
it.aDone = !it.a.Next()
return true
}
nextA, nextB := it.a.Hash(), it.b.Hash()
if diff := bytes.Compare(nextA[:], nextB[:]); diff < 0 {
it.aDone = !it.a.Next()
it.k = nextA
return true
} else if diff == 0 {
// Now we need to advance one of them
it.aDone = !it.a.Next()
goto first
}
it.bDone = !it.b.Next()
it.k = nextB
return true
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
func (it *binaryIterator) Error() error {
return it.fail
}
// Hash returns the hash of the account the iterator is currently at.
func (it *binaryIterator) Hash() common.Hash {
return it.k
}
// Release recursively releases all the iterators in the stack.
func (it *binaryIterator) Release() {
it.a.Release()
it.b.Release()
}
// accountBinaryIterator is a wrapper around a binary iterator that adds functionality
// to retrieve account data from the associated layer at the current position.
type accountBinaryIterator struct {
*binaryIterator
layer layer
}
// newBinaryAccountIterator creates a simplistic account iterator to step over
// all the accounts in a slow, but easily verifiable way.
//
//nolint:all
func (dl *diskLayer) newBinaryAccountIterator() AccountIterator {
return &accountBinaryIterator{
binaryIterator: dl.initBinaryAccountIterator(),
layer: dl,
}
}
// newBinaryAccountIterator creates a simplistic account iterator to step over
// all the accounts in a slow, but easily verifiable way.
func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
return &accountBinaryIterator{
binaryIterator: dl.initBinaryAccountIterator(),
layer: dl,
}
}
// Account returns the RLP encoded slim account the iterator is currently at, or
// nil if the iterated snapshot stack became stale (you can check Error after
// to see if it failed or not).
//
// Note the returned account is not a copy, please don't modify it.
func (it *accountBinaryIterator) Account() []byte {
blob, err := it.layer.account(it.k, 0)
if err != nil {
it.fail = err
return nil
}
return blob
}
// storageBinaryIterator is a wrapper around a binary iterator that adds functionality
// to retrieve storage slot data from the associated layer at the current position.
type storageBinaryIterator struct {
*binaryIterator
layer layer
}
// newBinaryStorageIterator creates a simplistic account iterator to step over
// all the storage slots in a slow, but easily verifiable way.
//
//nolint:all
func (dl *diskLayer) newBinaryStorageIterator(account common.Hash) StorageIterator {
return &storageBinaryIterator{
binaryIterator: dl.initBinaryStorageIterator(account),
layer: dl,
}
}
// newBinaryStorageIterator creates a simplistic account iterator to step over
// all the storage slots in a slow, but easily verifiable way.
func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator {
return &storageBinaryIterator{
binaryIterator: dl.initBinaryStorageIterator(account),
layer: dl,
}
}
// Slot returns the raw storage slot data the iterator is currently at, or
// nil if the iterated snapshot stack became stale (you can check Error after
// to see if it failed or not).
//
// Note the returned slot is not a copy, please don't modify it.
func (it *storageBinaryIterator) Slot() []byte {
blob, err := it.layer.storage(it.account, it.k, 0)
if err != nil {
it.fail = err
return nil
}
return blob
}

@ -0,0 +1,372 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package pathdb
import (
"bytes"
"fmt"
"slices"
"sort"
"github.com/ethereum/go-ethereum/common"
)
// weightedIterator is an iterator with an assigned weight. It is used to prioritise
// which account or storage slot is the correct one if multiple iterators find the
// same one (modified in multiple consecutive blocks).
type weightedIterator struct {
it Iterator
priority int
}
func (it *weightedIterator) Cmp(other *weightedIterator) int {
// Order the iterators primarily by the account hashes
hashI := it.it.Hash()
hashJ := other.it.Hash()
switch bytes.Compare(hashI[:], hashJ[:]) {
case -1:
return -1
case 1:
return 1
}
// Same account/storage-slot in multiple layers, split by priority
if it.priority < other.priority {
return -1
}
if it.priority > other.priority {
return 1
}
return 0
}
// fastIterator is a more optimized multi-layer iterator which maintains a
// direct mapping of all iterators leading down to the bottom layer.
type fastIterator struct {
db *Database // Database to reinitialize stale sub-iterators with
root common.Hash // Root hash to reinitialize stale sub-iterators through
curAccount []byte
curSlot []byte
iterators []*weightedIterator
initiated bool
account bool
fail error
}
// newFastIterator creates a new hierarchical account or storage iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
func newFastIterator(db *Database, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) {
current := db.tree.get(root)
if current == nil {
return nil, fmt.Errorf("unknown snapshot: %x", root)
}
fi := &fastIterator{
db: db,
root: root,
account: accountIterator,
}
loop:
for depth := 0; current != nil; depth++ {
if accountIterator {
switch dl := current.(type) {
case *diskLayer:
fi.iterators = append(fi.iterators, &weightedIterator{
it: newDiffAccountIterator(seek, dl.buffer.states, dl.isStale),
priority: depth,
})
fi.iterators = append(fi.iterators, &weightedIterator{
it: newDiskAccountIterator(dl.db.diskdb, seek),
priority: depth + 1,
})
case *diffLayer:
fi.iterators = append(fi.iterators, &weightedIterator{
it: newDiffAccountIterator(seek, dl.states.stateSet, nil),
priority: depth,
})
}
} else {
// If the whole storage is destructed in this layer, don't
// bother deeper layer anymore. But we should still keep
// the iterator for this layer, since the iterator can contain
// some valid slots which belongs to the re-created account.
switch dl := current.(type) {
case *diskLayer:
it, destructed := newDiffStorageIterator(account, seek, dl.buffer.states, dl.isStale)
fi.iterators = append(fi.iterators, &weightedIterator{
it: it,
priority: depth,
})
if destructed {
break loop
}
fi.iterators = append(fi.iterators, &weightedIterator{
it: newDiskStorageIterator(dl.db.diskdb, account, seek),
priority: depth + 1,
})
case *diffLayer:
it, destructed := newDiffStorageIterator(account, seek, dl.states.stateSet, nil)
fi.iterators = append(fi.iterators, &weightedIterator{
it: it,
priority: depth,
})
if destructed {
break loop
}
}
}
current = current.parentLayer()
}
fi.init()
return fi, nil
}
// init walks over all the iterators and resolves any clashes between them, after
// which it prepares the stack for step-by-step iteration.
func (fi *fastIterator) init() {
// Track which account hashes are iterators positioned on
var positioned = make(map[common.Hash]int)
// Position all iterators and track how many remain live
for i := 0; i < len(fi.iterators); i++ {
// Retrieve the first element and if it clashes with a previous iterator,
// advance either the current one or the old one. Repeat until nothing is
// clashing any more.
it := fi.iterators[i]
for {
// If the iterator is exhausted, drop it off the end
if !it.it.Next() {
it.it.Release()
last := len(fi.iterators) - 1
fi.iterators[i] = fi.iterators[last]
fi.iterators[last] = nil
fi.iterators = fi.iterators[:last]
i--
break
}
// The iterator is still alive, check for collisions with previous ones
hash := it.it.Hash()
if other, exist := positioned[hash]; !exist {
positioned[hash] = i
break
} else {
// Iterators collide, one needs to be progressed, use priority to
// determine which.
//
// This whole else-block can be avoided, if we instead
// do an initial priority-sort of the iterators. If we do that,
// then we'll only wind up here if a lower-priority (preferred) iterator
// has the same value, and then we will always just continue.
// However, it costs an extra sort, so it's probably not better
if fi.iterators[other].priority < it.priority {
// The 'it' should be progressed
continue
} else {
// The 'other' should be progressed, swap them
it = fi.iterators[other]
fi.iterators[other], fi.iterators[i] = fi.iterators[i], fi.iterators[other]
continue
}
}
}
}
// Re-sort the entire list
slices.SortFunc(fi.iterators, func(a, b *weightedIterator) int { return a.Cmp(b) })
fi.initiated = false
}
// Next steps the iterator forward one element, returning false if exhausted.
func (fi *fastIterator) Next() bool {
if len(fi.iterators) == 0 {
return false
}
if !fi.initiated {
// Don't forward first time -- we had to 'Next' once in order to
// do the sorting already
fi.initiated = true
if fi.account {
fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
} else {
fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
}
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
return false
}
if fi.curAccount != nil || fi.curSlot != nil {
return true
}
// Implicit else: we've hit a nil-account or nil-slot, and need to
// fall through to the loop below to land on something non-nil
}
// If an account or a slot is deleted in one of the layers, the key will
// still be there, but the actual value will be nil. However, the iterator
// should not export nil-values (but instead simply omit the key), so we
// need to loop here until we either
// - get a non-nil value,
// - hit an error,
// - or exhaust the iterator
for {
if !fi.next(0) {
return false // exhausted
}
if fi.account {
fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
} else {
fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
}
if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
fi.fail = innerErr
return false // error
}
if fi.curAccount != nil || fi.curSlot != nil {
break // non-nil value found
}
}
return true
}
// next handles the next operation internally and should be invoked when we know
// that two elements in the list may have the same value.
//
// For example, if the iterated hashes become [2,3,5,5,8,9,10], then we should
// invoke next(3), which will call Next on elem 3 (the second '5') and will
// cascade along the list, applying the same operation if needed.
func (fi *fastIterator) next(idx int) bool {
// If this particular iterator got exhausted, remove it and return true (the
// next one is surely not exhausted yet, otherwise it would have been removed
// already).
if it := fi.iterators[idx].it; !it.Next() {
it.Release()
fi.iterators = append(fi.iterators[:idx], fi.iterators[idx+1:]...)
return len(fi.iterators) > 0
}
// If there's no one left to cascade into, return
if idx == len(fi.iterators)-1 {
return true
}
// We next-ed the iterator at 'idx', now we may have to re-sort that element
var (
cur, next = fi.iterators[idx], fi.iterators[idx+1]
curHash, nextHash = cur.it.Hash(), next.it.Hash()
)
if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 {
// It is still in correct place
return true
} else if diff == 0 && cur.priority < next.priority {
// So still in correct place, but we need to iterate on the next
fi.next(idx + 1)
return true
}
// At this point, the iterator is in the wrong location, but the remaining
// list is sorted. Find out where to move the item.
clash := -1
index := sort.Search(len(fi.iterators), func(n int) bool {
// The iterator always advances forward, so anything before the old slot
// is known to be behind us, so just skip them altogether. This actually
// is an important clause since the sort order got invalidated.
if n < idx {
return false
}
if n == len(fi.iterators)-1 {
// Can always place an elem last
return true
}
nextHash := fi.iterators[n+1].it.Hash()
if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 {
return true
} else if diff > 0 {
return false
}
// The elem we're placing it next to has the same value,
// so whichever winds up on n+1 will need further iteration
clash = n + 1
return cur.priority < fi.iterators[n+1].priority
})
fi.move(idx, index)
if clash != -1 {
fi.next(clash)
}
return true
}
// move advances an iterator to another position in the list.
func (fi *fastIterator) move(index, newpos int) {
elem := fi.iterators[index]
copy(fi.iterators[index:], fi.iterators[index+1:newpos+1])
fi.iterators[newpos] = elem
}
// Error returns any failure that occurred during iteration, which might have
// caused a premature iteration exit (e.g. snapshot stack becoming stale).
func (fi *fastIterator) Error() error {
return fi.fail
}
// Hash returns the current key
func (fi *fastIterator) Hash() common.Hash {
return fi.iterators[0].it.Hash()
}
// Account returns the current account blob.
// Note the returned account is not a copy, please don't modify it.
func (fi *fastIterator) Account() []byte {
return fi.curAccount
}
// Slot returns the current storage slot.
// Note the returned slot is not a copy, please don't modify it.
func (fi *fastIterator) Slot() []byte {
return fi.curSlot
}
// Release iterates over all the remaining live layer iterators and releases each
// of them individually.
func (fi *fastIterator) Release() {
for _, it := range fi.iterators {
it.it.Release()
}
fi.iterators = nil
}
// Debug is a convenience helper during testing
func (fi *fastIterator) Debug() {
for _, it := range fi.iterators {
fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0])
}
fmt.Println()
}
// newFastAccountIterator creates a new hierarchical account iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
func newFastAccountIterator(db *Database, root common.Hash, seek common.Hash) (AccountIterator, error) {
return newFastIterator(db, root, common.Hash{}, seek, true)
}
// newFastStorageIterator creates a new hierarchical storage iterator with one
// element per diff layer. The returned combo iterator can be used to walk over
// the entire snapshot diff stack simultaneously.
func newFastStorageIterator(db *Database, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
return newFastIterator(db, root, account, seek, false)
}

File diff suppressed because it is too large Load Diff

@ -86,6 +86,17 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
return blob, nil return blob, nil
} }
// AccountRLP directly retrieves the account associated with a particular hash.
// An error will be returned if the read operation exits abnormally. Specifically,
// if the layer is already stale.
//
// Note:
// - the returned account data is not a copy, please don't modify it
// - no error will be returned if the requested account is not found in database
func (r *reader) AccountRLP(hash common.Hash) ([]byte, error) {
return r.layer.account(hash, 0)
}
// Account directly retrieves the account associated with a particular hash in // Account directly retrieves the account associated with a particular hash in
// the slim data format. An error will be returned if the read operation exits // the slim data format. An error will be returned if the read operation exits
// abnormally. Specifically, if the layer is already stale. // abnormally. Specifically, if the layer is already stale.

@ -192,8 +192,6 @@ func (s *stateSet) check() uint64 {
// the deleted ones. // the deleted ones.
// //
// Note, the returned slice is not a copy, so do not modify it. // Note, the returned slice is not a copy, so do not modify it.
//
//nolint:unused
func (s *stateSet) accountList() []common.Hash { func (s *stateSet) accountList() []common.Hash {
// If an old list already exists, return it // If an old list already exists, return it
s.lock.RLock() s.lock.RLock()
@ -229,8 +227,6 @@ func (s *stateSet) accountList() []common.Hash {
// not empty but the flag is true. // not empty but the flag is true.
// //
// Note, the returned slice is not a copy, so do not modify it. // Note, the returned slice is not a copy, so do not modify it.
//
//nolint:unused
func (s *stateSet) storageList(accountHash common.Hash) ([]common.Hash, bool) { func (s *stateSet) storageList(accountHash common.Hash) ([]common.Hash, bool) {
s.lock.RLock() s.lock.RLock()
_, destructed := s.destructSet[accountHash] _, destructed := s.destructSet[accountHash]

Loading…
Cancel
Save