diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go
index cfbdb01c49..69076bca16 100644
--- a/triedb/pathdb/database.go
+++ b/triedb/pathdb/database.go
@@ -555,3 +555,21 @@ func (db *Database) StorageHistory(address common.Address, slot common.Hash, sta
func (db *Database) HistoryRange() (uint64, uint64, error) {
return historyRange(db.freezer)
}
+
+// AccountIterator creates a new account iterator for the specified root hash and
+// seeks to a starting account hash.
+func (db *Database) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
+ //if gen := db.tree.bottom().generator; gen != nil && !gen.completed() {
+ // return nil, errNotConstructed
+ //}
+ return newFastAccountIterator(db, root, seek)
+}
+
+// StorageIterator creates a new storage iterator for the specified root hash and
+// account. The iterator will be move to the specific start position.
+func (db *Database) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ //if gen := db.tree.bottom().generator; gen != nil && !gen.completed() {
+ // return nil, errNotConstructed
+ //}
+ return newFastStorageIterator(db, root, account, seek)
+}
diff --git a/triedb/pathdb/holdable_iterator.go b/triedb/pathdb/holdable_iterator.go
new file mode 100644
index 0000000000..4b852e65ab
--- /dev/null
+++ b/triedb/pathdb/holdable_iterator.go
@@ -0,0 +1,97 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+// holdableIterator is a wrapper of underlying database iterator. It extends
+// the basic iterator interface by adding Hold which can hold the element
+// locally where the iterator is currently located and serve it up next time.
+type holdableIterator struct {
+ it ethdb.Iterator
+ key []byte
+ val []byte
+ atHeld bool
+}
+
+// newHoldableIterator initializes the holdableIterator with the given iterator.
+func newHoldableIterator(it ethdb.Iterator) *holdableIterator {
+ return &holdableIterator{it: it}
+}
+
+// Hold holds the element locally where the iterator is currently located which
+// can be served up next time.
+func (it *holdableIterator) Hold() {
+ if it.it.Key() == nil {
+ return // nothing to hold
+ }
+ it.key = common.CopyBytes(it.it.Key())
+ it.val = common.CopyBytes(it.it.Value())
+ it.atHeld = false
+}
+
+// Next moves the iterator to the next key/value pair. It returns whether the
+// iterator is exhausted.
+func (it *holdableIterator) Next() bool {
+ if !it.atHeld && it.key != nil {
+ it.atHeld = true
+ } else if it.atHeld {
+ it.atHeld = false
+ it.key = nil
+ it.val = nil
+ }
+ if it.key != nil {
+ return true // shifted to locally held value
+ }
+ return it.it.Next()
+}
+
+// Error returns any accumulated error. Exhausting all the key/value pairs
+// is not considered to be an error.
+func (it *holdableIterator) Error() error { return it.it.Error() }
+
+// Release releases associated resources. Release should always succeed and can
+// be called multiple times without causing error.
+func (it *holdableIterator) Release() {
+ it.atHeld = false
+ it.key = nil
+ it.val = nil
+ it.it.Release()
+}
+
+// Key returns the key of the current key/value pair, or nil if done. The caller
+// should not modify the contents of the returned slice, and its contents may
+// change on the next call to Next.
+func (it *holdableIterator) Key() []byte {
+ if it.key != nil {
+ return it.key
+ }
+ return it.it.Key()
+}
+
+// Value returns the value of the current key/value pair, or nil if done. The
+// caller should not modify the contents of the returned slice, and its contents
+// may change on the next call to Next.
+func (it *holdableIterator) Value() []byte {
+ if it.val != nil {
+ return it.val
+ }
+ return it.it.Value()
+}
diff --git a/triedb/pathdb/holdable_iterator_test.go b/triedb/pathdb/holdable_iterator_test.go
new file mode 100644
index 0000000000..f26be96c50
--- /dev/null
+++ b/triedb/pathdb/holdable_iterator_test.go
@@ -0,0 +1,177 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+)
+
+func TestIteratorHold(t *testing.T) {
+ // Create the key-value data store
+ var (
+ content = map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}
+ order = []string{"k1", "k2", "k3"}
+ db = rawdb.NewMemoryDatabase()
+ )
+ for key, val := range content {
+ if err := db.Put([]byte(key), []byte(val)); err != nil {
+ t.Fatalf("failed to insert item %s:%s into database: %v", key, val, err)
+ }
+ }
+ // Iterate over the database with the given configs and verify the results
+ it, idx := newHoldableIterator(db.NewIterator(nil, nil)), 0
+
+ // Nothing should be affected for calling Discard on non-initialized iterator
+ it.Hold()
+
+ for it.Next() {
+ if len(content) <= idx {
+ t.Errorf("more items than expected: checking idx=%d (key %q), expecting len=%d", idx, it.Key(), len(order))
+ break
+ }
+ if !bytes.Equal(it.Key(), []byte(order[idx])) {
+ t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
+ }
+ if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
+ t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
+ }
+ // Should be safe to call discard multiple times
+ it.Hold()
+ it.Hold()
+
+ // Shift iterator to the discarded element
+ it.Next()
+ if !bytes.Equal(it.Key(), []byte(order[idx])) {
+ t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
+ }
+ if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
+ t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
+ }
+
+ // Discard/Next combo should work always
+ it.Hold()
+ it.Next()
+ if !bytes.Equal(it.Key(), []byte(order[idx])) {
+ t.Errorf("item %d: key mismatch: have %s, want %s", idx, string(it.Key()), order[idx])
+ }
+ if !bytes.Equal(it.Value(), []byte(content[order[idx]])) {
+ t.Errorf("item %d: value mismatch: have %s, want %s", idx, string(it.Value()), content[order[idx]])
+ }
+ idx++
+ }
+ if err := it.Error(); err != nil {
+ t.Errorf("iteration failed: %v", err)
+ }
+ if idx != len(order) {
+ t.Errorf("iteration terminated prematurely: have %d, want %d", idx, len(order))
+ }
+ db.Close()
+}
+
+func TestReopenIterator(t *testing.T) {
+ var (
+ content = map[common.Hash]string{
+ common.HexToHash("a1"): "v1",
+ common.HexToHash("a2"): "v2",
+ common.HexToHash("a3"): "v3",
+ common.HexToHash("a4"): "v4",
+ common.HexToHash("a5"): "v5",
+ common.HexToHash("a6"): "v6",
+ }
+ order = []common.Hash{
+ common.HexToHash("a1"),
+ common.HexToHash("a2"),
+ common.HexToHash("a3"),
+ common.HexToHash("a4"),
+ common.HexToHash("a5"),
+ common.HexToHash("a6"),
+ }
+ db = rawdb.NewMemoryDatabase()
+
+ reopen = func(db ethdb.KeyValueStore, iter *holdableIterator) *holdableIterator {
+ if !iter.Next() {
+ iter.Release()
+ return newHoldableIterator(memorydb.New().NewIterator(nil, nil))
+ }
+ next := iter.Key()
+ iter.Release()
+ return newHoldableIterator(db.NewIterator(rawdb.SnapshotAccountPrefix, next[1:]))
+ }
+ _ = reopen
+ )
+ for key, val := range content {
+ rawdb.WriteAccountSnapshot(db, key, []byte(val))
+ }
+ checkVal := func(it *holdableIterator, index int) {
+ if !bytes.Equal(it.Key(), append(rawdb.SnapshotAccountPrefix, order[index].Bytes()...)) {
+ t.Fatalf("Unexpected data entry key, want %v got %v", order[index], it.Key())
+ }
+ if !bytes.Equal(it.Value(), []byte(content[order[index]])) {
+ t.Fatalf("Unexpected data entry key, want %v got %v", []byte(content[order[index]]), it.Value())
+ }
+ }
+ // Iterate over the database with the given configs and verify the results
+ dbIter := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
+ iter, idx := newHoldableIterator(rawdb.NewKeyLengthIterator(dbIter, 1+common.HashLength)), -1
+
+ idx++
+ iter.Next()
+ checkVal(iter, idx)
+
+ iter = reopen(db, iter)
+ idx++
+ iter.Next()
+ checkVal(iter, idx)
+
+ // reopen twice
+ iter = reopen(db, iter)
+ iter = reopen(db, iter)
+ idx++
+ iter.Next()
+ checkVal(iter, idx)
+
+ // reopen iterator with held value
+ iter.Next()
+ iter.Hold()
+ iter = reopen(db, iter)
+ idx++
+ iter.Next()
+ checkVal(iter, idx)
+
+ // reopen twice iterator with held value
+ iter.Next()
+ iter.Hold()
+ iter = reopen(db, iter)
+ iter = reopen(db, iter)
+ idx++
+ iter.Next()
+ checkVal(iter, idx)
+
+ // shift to the end and reopen
+ iter.Next() // the end
+ iter = reopen(db, iter)
+ iter.Next()
+ if iter.Key() != nil {
+ t.Fatal("Unexpected iterated entry")
+ }
+}
diff --git a/triedb/pathdb/iterator.go b/triedb/pathdb/iterator.go
new file mode 100644
index 0000000000..b807702331
--- /dev/null
+++ b/triedb/pathdb/iterator.go
@@ -0,0 +1,407 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+// Iterator is an iterator to step over all the accounts or the specific
+// storage in a snapshot which may or may not be composed of multiple layers.
+type Iterator interface {
+ // Next steps the iterator forward one element, returning false if exhausted,
+ // or an error if iteration failed for some reason (e.g. root being iterated
+ // becomes stale and garbage collected).
+ Next() bool
+
+ // Error returns any failure that occurred during iteration, which might have
+ // caused a premature iteration exit (e.g. snapshot stack becoming stale).
+ Error() error
+
+ // Hash returns the hash of the account or storage slot the iterator is
+ // currently at.
+ Hash() common.Hash
+
+ // Release releases associated resources. Release should always succeed and
+ // can be called multiple times without causing error.
+ Release()
+}
+
+// AccountIterator is an iterator to step over all the accounts in a snapshot,
+// which may or may not be composed of multiple layers.
+type AccountIterator interface {
+ Iterator
+
+ // Account returns the RLP encoded slim account the iterator is currently at.
+ // An error will be returned if the iterator becomes invalid
+ Account() []byte
+}
+
+// StorageIterator is an iterator to step over the specific storage in a snapshot,
+// which may or may not be composed of multiple layers.
+type StorageIterator interface {
+ Iterator
+
+ // Slot returns the storage slot the iterator is currently at. An error will
+ // be returned if the iterator becomes invalid
+ Slot() []byte
+}
+
+// diffAccountIterator is an account iterator that steps over the accounts (both
+// live and deleted) contained within a state set. Higher order iterators will
+// use the deleted accounts to skip deeper iterators.
+//
+// This iterator could be created from the diff layer or the disk layer (the
+// aggregated state buffer).
+type diffAccountIterator struct {
+ // curHash is the current hash the iterator is positioned on. The field is
+ // explicitly tracked since the referenced state set might go stale after
+ // the iterator was positioned and we don't want to fail accessing the old
+ // hash as long as the iterator is not touched any more.
+ curHash common.Hash
+
+ states *stateSet // Live state set to retrieve accounts from
+ stale func() bool // Signal if the referenced state set is stale
+ keys []common.Hash // Keys left in the layer to iterate
+ fail error // Any failures encountered (stale)
+}
+
+// AccountIterator creates an account iterator over the given state set.
+func newDiffAccountIterator(seek common.Hash, states *stateSet, stale func() bool) AccountIterator {
+ // Seek out the requested starting account
+ hashes := states.accountList()
+ index := sort.Search(len(hashes), func(i int) bool {
+ return bytes.Compare(seek[:], hashes[i][:]) <= 0
+ })
+ // Assemble and returned the already seeked iterator
+ return &diffAccountIterator{
+ states: states,
+ stale: stale,
+ keys: hashes[index:],
+ }
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diffAccountIterator) Next() bool {
+ // If the iterator was already stale, consider it a programmer error. Although
+ // we could just return false here, triggering this path would probably mean
+ // somebody forgot to check for Error, so lets blow up instead of undefined
+ // behavior that's hard to debug.
+ if it.fail != nil {
+ panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
+ }
+ // Stop iterating if all keys were exhausted
+ if len(it.keys) == 0 {
+ return false
+ }
+ if it.stale != nil && it.stale() {
+ it.fail, it.keys = errSnapshotStale, nil
+ return false
+ }
+ // Iterator seems to be still alive, retrieve and cache the live hash
+ it.curHash = it.keys[0]
+ // key cached, shift the iterator and notify the user of success
+ it.keys = it.keys[1:]
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (it *diffAccountIterator) Error() error {
+ return it.fail
+}
+
+// Hash returns the hash of the account the iterator is currently at.
+func (it *diffAccountIterator) Hash() common.Hash {
+ return it.curHash
+}
+
+// Account returns the RLP encoded slim account the iterator is currently at.
+// This method may _fail_, if the associated state goes stale between the call
+// to Next and Account. That type of error will set it.fail.
+//
+// This method assumes that states modification does not delete elements from
+// the account mapping (writing nil into it is fine though), and will panic
+// if elements have been deleted.
+//
+// Note the returned account is not a copy, please don't modify it.
+func (it *diffAccountIterator) Account() []byte {
+ blob, ok := it.states.account(it.curHash)
+ if !ok {
+ panic(fmt.Sprintf("iterator referenced non-existent account: %x", it.curHash))
+ }
+ if it.stale != nil && it.stale() {
+ it.fail, it.keys = errSnapshotStale, nil
+ }
+ return blob
+}
+
+// Release is a noop for diff account iterators as there are no held resources.
+func (it *diffAccountIterator) Release() {}
+
+// diskAccountIterator is an account iterator that steps over the persistent
+// accounts within the database.
+//
+// To simplify, the staleness of the persistent state is not tracked. The disk
+// iterator is not intended to be used alone. It should always be wrapped with
+// a diff iterator, as the bottom-most disk layer uses both the in-memory
+// aggregated buffer and the persistent disk layer as data sources. The staleness
+// of the diff iterator is sufficient to invalidate the iterator pair.
+type diskAccountIterator struct {
+ it ethdb.Iterator
+}
+
+// newDiskAccountIterator creates an account iterator over the persistent state.
+func newDiskAccountIterator(db ethdb.KeyValueStore, seek common.Hash) AccountIterator {
+ pos := common.TrimRightZeroes(seek[:])
+ return &diskAccountIterator{
+ it: db.NewIterator(rawdb.SnapshotAccountPrefix, pos),
+ }
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diskAccountIterator) Next() bool {
+ // If the iterator was already exhausted, don't bother
+ if it.it == nil {
+ return false
+ }
+ // Try to advance the iterator and release it if we reached the end
+ for {
+ if !it.it.Next() {
+ it.it.Release()
+ it.it = nil
+ return false
+ }
+ if len(it.it.Key()) == len(rawdb.SnapshotAccountPrefix)+common.HashLength {
+ break
+ }
+ }
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+//
+// A diff layer is immutable after creation content wise and can always be fully
+// iterated without error, so this method always returns nil.
+func (it *diskAccountIterator) Error() error {
+ if it.it == nil {
+ return nil // Iterator is exhausted and released
+ }
+ return it.it.Error()
+}
+
+// Hash returns the hash of the account the iterator is currently at.
+func (it *diskAccountIterator) Hash() common.Hash {
+ return common.BytesToHash(it.it.Key()) // The prefix will be truncated
+}
+
+// Account returns the RLP encoded slim account the iterator is currently at.
+func (it *diskAccountIterator) Account() []byte {
+ return it.it.Value()
+}
+
+// Release releases the database snapshot held during iteration.
+func (it *diskAccountIterator) Release() {
+ // The iterator is auto-released on exhaustion, so make sure it's still alive
+ if it.it != nil {
+ it.it.Release()
+ it.it = nil
+ }
+}
+
+// diffStorageIterator is a storage iterator that steps over the specific storage
+// (both live and deleted) contained within a state set. Higher order iterators
+// will use the deleted slot to skip deeper iterators.
+//
+// This iterator could be created from the diff layer or the disk layer (the
+// aggregated state buffer).
+type diffStorageIterator struct {
+ // curHash is the current hash the iterator is positioned on. The field is
+ // explicitly tracked since the referenced state set might go stale after
+ // the iterator was positioned and we don't want to fail accessing the old
+ // hash as long as the iterator is not touched any more.
+ curHash common.Hash
+ account common.Hash
+
+ states *stateSet // Live state set to retrieve storage slots from
+ stale func() bool // Signal if the referenced state set is stale
+ keys []common.Hash // Keys left in the layer to iterate
+ fail error // Any failures encountered (stale)
+}
+
+// newDiffStorageIterator creates a storage iterator over a single diff layer.
+// Except the storage iterator is returned, there is an additional flag
+// "destructed" returned. If it's true then it means the whole storage is
+// destructed in this layer(maybe recreated too), don't bother deeper layer
+// for storage retrieval.
+func newDiffStorageIterator(account common.Hash, seek common.Hash, states *stateSet, stale func() bool) (StorageIterator, bool) {
+ // Create the storage for this account even it's marked
+ // as destructed. The iterator is for the new one which
+ // just has the same address as the deleted one.
+ hashes, destructed := states.storageList(account)
+ index := sort.Search(len(hashes), func(i int) bool {
+ return bytes.Compare(seek[:], hashes[i][:]) <= 0
+ })
+ // Assemble and returned the already seeked iterator
+ return &diffStorageIterator{
+ states: states,
+ stale: stale,
+ account: account,
+ keys: hashes[index:],
+ }, destructed
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diffStorageIterator) Next() bool {
+ // If the iterator was already stale, consider it a programmer error. Although
+ // we could just return false here, triggering this path would probably mean
+ // somebody forgot to check for Error, so lets blow up instead of undefined
+ // behavior that's hard to debug.
+ if it.fail != nil {
+ panic(fmt.Sprintf("called Next of failed iterator: %v", it.fail))
+ }
+ // Stop iterating if all keys were exhausted
+ if len(it.keys) == 0 {
+ return false
+ }
+ if it.stale != nil && it.stale() {
+ it.fail, it.keys = errSnapshotStale, nil
+ return false
+ }
+ // Iterator seems to be still alive, retrieve and cache the live hash
+ it.curHash = it.keys[0]
+
+ // key cached, shift the iterator and notify the user of success
+ it.keys = it.keys[1:]
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (it *diffStorageIterator) Error() error {
+ return it.fail
+}
+
+// Hash returns the hash of the storage slot the iterator is currently at.
+func (it *diffStorageIterator) Hash() common.Hash {
+ return it.curHash
+}
+
+// Slot returns the raw storage slot value the iterator is currently at.
+// This method may _fail_, if the associated state sets is modified between
+// the call to Next and Value. That type of error will set it.fail.
+// This method assumes that state set modification does not delete elements
+// from the storage mapping (writing nil into it is fine though), and will
+// panic if elements have been deleted.
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (it *diffStorageIterator) Slot() []byte {
+ storage, ok := it.states.storage(it.account, it.curHash)
+ if !ok {
+ panic(fmt.Sprintf("iterator referenced non-existent storage: %x %x", it.account, it.curHash))
+ }
+ if it.stale != nil && it.stale() {
+ it.fail, it.keys = errSnapshotStale, nil
+ }
+ return storage
+}
+
+// Release is a noop for diff account iterators as there are no held resources.
+func (it *diffStorageIterator) Release() {}
+
+// diskStorageIterator is a storage iterator that steps over the persistent
+// storage slots contained within the database.
+//
+// To simplify, the staleness of the persistent state is not tracked. The disk
+// iterator is not intended to be used alone. It should always be wrapped with
+// a diff iterator, as the bottom-most disk layer uses both the in-memory
+// aggregated buffer and the persistent disk layer as data sources. The staleness
+// of the diff iterator is sufficient to invalidate the iterator pair.
+type diskStorageIterator struct {
+ account common.Hash
+ it ethdb.Iterator
+}
+
+// StorageIterator creates a storage iterator over the persistent state.
+// If the whole storage is destructed, then all entries in the disk
+// layer are deleted already. So the "destructed" flag returned here
+// is always false.
+func newDiskStorageIterator(db ethdb.KeyValueStore, account common.Hash, seek common.Hash) StorageIterator {
+ pos := common.TrimRightZeroes(seek[:])
+ return &diskStorageIterator{
+ account: account,
+ it: db.NewIterator(append(rawdb.SnapshotStoragePrefix, account.Bytes()...), pos),
+ }
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (it *diskStorageIterator) Next() bool {
+ // If the iterator was already exhausted, don't bother
+ if it.it == nil {
+ return false
+ }
+ // Try to advance the iterator and release it if we reached the end
+ for {
+ if !it.it.Next() {
+ it.it.Release()
+ it.it = nil
+ return false
+ }
+ if len(it.it.Key()) == len(rawdb.SnapshotStoragePrefix)+common.HashLength+common.HashLength {
+ break
+ }
+ }
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+//
+// A diff layer is immutable after creation content wise and can always be fully
+// iterated without error, so this method always returns nil.
+func (it *diskStorageIterator) Error() error {
+ if it.it == nil {
+ return nil // Iterator is exhausted and released
+ }
+ return it.it.Error()
+}
+
+// Hash returns the hash of the storage slot the iterator is currently at.
+func (it *diskStorageIterator) Hash() common.Hash {
+ return common.BytesToHash(it.it.Key()) // The prefix will be truncated
+}
+
+// Slot returns the raw storage slot content the iterator is currently at.
+func (it *diskStorageIterator) Slot() []byte {
+ return it.it.Value()
+}
+
+// Release releases the database snapshot held during iteration.
+func (it *diskStorageIterator) Release() {
+ // The iterator is auto-released on exhaustion, so make sure it's still alive
+ if it.it != nil {
+ it.it.Release()
+ it.it = nil
+ }
+}
diff --git a/triedb/pathdb/iterator_binary.go b/triedb/pathdb/iterator_binary.go
new file mode 100644
index 0000000000..5f6cf3b718
--- /dev/null
+++ b/triedb/pathdb/iterator_binary.go
@@ -0,0 +1,290 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// binaryIterator is a simplistic iterator to step over the accounts or storage
+// in a snapshot, which may or may not be composed of multiple layers. Performance
+// wise this iterator is slow, it's meant for cross validating the fast one,
+type binaryIterator struct {
+ a Iterator
+ b Iterator
+ aDone bool
+ bDone bool
+ k common.Hash
+ account common.Hash
+ fail error
+}
+
+// initBinaryAccountIterator creates a simplistic iterator to step over all the
+// accounts in a slow, but easily verifiable way. Note this function is used
+// for initialization, use `newBinaryAccountIterator` as the API.
+func (dl *diskLayer) initBinaryAccountIterator() *binaryIterator {
+ // Create two iterators for state buffer and the persistent state in disk
+ // respectively and combine them as a binary iterator.
+ l := &binaryIterator{
+ a: newDiffAccountIterator(common.Hash{}, dl.buffer.states, dl.isStale),
+ b: newDiskAccountIterator(dl.db.diskdb, common.Hash{}),
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+}
+
+// initBinaryAccountIterator creates a simplistic iterator to step over all the
+// accounts in a slow, but easily verifiable way. Note this function is used
+// for initialization, use `newBinaryAccountIterator` as the API.
+func (dl *diffLayer) initBinaryAccountIterator() *binaryIterator {
+ parent, ok := dl.parent.(*diffLayer)
+ if !ok {
+ l := &binaryIterator{
+ a: newDiffAccountIterator(common.Hash{}, dl.states.stateSet, nil),
+ b: dl.parent.(*diskLayer).initBinaryAccountIterator(),
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+ }
+ l := &binaryIterator{
+ a: newDiffAccountIterator(common.Hash{}, dl.states.stateSet, nil),
+ b: parent.initBinaryAccountIterator(),
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+}
+
+// initBinaryStorageIterator creates a simplistic iterator to step over all the
+// storage slots in a slow, but easily verifiable way. Note this function is used
+// for initialization, use `newBinaryStorageIterator` as the API.
+func (dl *diskLayer) initBinaryStorageIterator(account common.Hash) *binaryIterator {
+ // Create two iterators for state buffer and the persistent state in disk
+ // respectively and combine them as a binary iterator.
+ //
+ // Mark the iterator b as exhausted if the corresponding account is destructed
+ // in the state buffer.
+ a, destructed := newDiffStorageIterator(account, common.Hash{}, dl.buffer.states, dl.isStale)
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ l := &binaryIterator{
+ a: a,
+ b: newDiskStorageIterator(dl.db.diskdb, account, common.Hash{}),
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+}
+
+// initBinaryStorageIterator creates a simplistic iterator to step over all the
+// storage slots in a slow, but easily verifiable way. Note this function is used
+// for initialization, use `newBinaryStorageIterator` as the API.
+func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) *binaryIterator {
+ parent, ok := dl.parent.(*diffLayer)
+ if !ok {
+ // If the storage in this layer is already destructed, discard all
+ // deeper layers but still return a valid single-branch iterator.
+ //
+ // Diff layer has no stale notion, the callback for checking staleness
+ // is nil here.
+ a, destructed := newDiffStorageIterator(account, common.Hash{}, dl.states.stateSet, nil)
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ l := &binaryIterator{
+ a: a,
+ b: dl.parent.(*diskLayer).initBinaryStorageIterator(account),
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+ }
+ // If the storage in this layer is already destructed, discard all
+ // deeper layers but still return a valid single-branch iterator.
+ //
+ // Diff layer has no stale notion, the callback for checking staleness
+ // is nil here.
+ a, destructed := newDiffStorageIterator(account, common.Hash{}, dl.states.stateSet, nil)
+ if destructed {
+ l := &binaryIterator{
+ a: a,
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = true
+ return l
+ }
+ l := &binaryIterator{
+ a: a,
+ b: parent.initBinaryStorageIterator(account),
+ account: account,
+ }
+ l.aDone = !l.a.Next()
+ l.bDone = !l.b.Next()
+ return l
+}
+
+// Next steps the iterator forward one element, returning false if exhausted,
+// or an error if iteration failed for some reason (e.g. root being iterated
+// becomes stale and garbage collected).
+func (it *binaryIterator) Next() bool {
+ if it.aDone && it.bDone {
+ return false
+ }
+first:
+ if it.aDone {
+ it.k = it.b.Hash()
+ it.bDone = !it.b.Next()
+ return true
+ }
+ if it.bDone {
+ it.k = it.a.Hash()
+ it.aDone = !it.a.Next()
+ return true
+ }
+ nextA, nextB := it.a.Hash(), it.b.Hash()
+ if diff := bytes.Compare(nextA[:], nextB[:]); diff < 0 {
+ it.aDone = !it.a.Next()
+ it.k = nextA
+ return true
+ } else if diff == 0 {
+ // Now we need to advance one of them
+ it.aDone = !it.a.Next()
+ goto first
+ }
+ it.bDone = !it.b.Next()
+ it.k = nextB
+ return true
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (it *binaryIterator) Error() error {
+ return it.fail
+}
+
+// Hash returns the hash of the account the iterator is currently at.
+func (it *binaryIterator) Hash() common.Hash {
+ return it.k
+}
+
+// Release recursively releases all the iterators in the stack.
+func (it *binaryIterator) Release() {
+ it.a.Release()
+ it.b.Release()
+}
+
+// accountBinaryIterator is a wrapper around a binary iterator that adds functionality
+// to retrieve account data from the associated layer at the current position.
+type accountBinaryIterator struct {
+ *binaryIterator
+ layer layer
+}
+
+// newBinaryAccountIterator creates a simplistic account iterator to step over
+// all the accounts in a slow, but easily verifiable way.
+//
+//nolint:all
+func (dl *diskLayer) newBinaryAccountIterator() AccountIterator {
+ return &accountBinaryIterator{
+ binaryIterator: dl.initBinaryAccountIterator(),
+ layer: dl,
+ }
+}
+
+// newBinaryAccountIterator creates a simplistic account iterator to step over
+// all the accounts in a slow, but easily verifiable way.
+func (dl *diffLayer) newBinaryAccountIterator() AccountIterator {
+ return &accountBinaryIterator{
+ binaryIterator: dl.initBinaryAccountIterator(),
+ layer: dl,
+ }
+}
+
+// Account returns the RLP encoded slim account the iterator is currently at, or
+// nil if the iterated snapshot stack became stale (you can check Error after
+// to see if it failed or not).
+//
+// Note the returned account is not a copy, please don't modify it.
+func (it *accountBinaryIterator) Account() []byte {
+ blob, err := it.layer.account(it.k, 0)
+ if err != nil {
+ it.fail = err
+ return nil
+ }
+ return blob
+}
+
+// storageBinaryIterator is a wrapper around a binary iterator that adds functionality
+// to retrieve storage slot data from the associated layer at the current position.
+type storageBinaryIterator struct {
+ *binaryIterator
+ layer layer
+}
+
+// newBinaryStorageIterator creates a simplistic account iterator to step over
+// all the storage slots in a slow, but easily verifiable way.
+//
+//nolint:all
+func (dl *diskLayer) newBinaryStorageIterator(account common.Hash) StorageIterator {
+ return &storageBinaryIterator{
+ binaryIterator: dl.initBinaryStorageIterator(account),
+ layer: dl,
+ }
+}
+
+// newBinaryStorageIterator creates a simplistic account iterator to step over
+// all the storage slots in a slow, but easily verifiable way.
+func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator {
+ return &storageBinaryIterator{
+ binaryIterator: dl.initBinaryStorageIterator(account),
+ layer: dl,
+ }
+}
+
+// Slot returns the raw storage slot data the iterator is currently at, or
+// nil if the iterated snapshot stack became stale (you can check Error after
+// to see if it failed or not).
+//
+// Note the returned slot is not a copy, please don't modify it.
+func (it *storageBinaryIterator) Slot() []byte {
+ blob, err := it.layer.storage(it.account, it.k, 0)
+ if err != nil {
+ it.fail = err
+ return nil
+ }
+ return blob
+}
diff --git a/triedb/pathdb/iterator_fast.go b/triedb/pathdb/iterator_fast.go
new file mode 100644
index 0000000000..0843fe2b43
--- /dev/null
+++ b/triedb/pathdb/iterator_fast.go
@@ -0,0 +1,372 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "fmt"
+ "slices"
+ "sort"
+
+ "github.com/ethereum/go-ethereum/common"
+)
+
+// weightedIterator is an iterator with an assigned weight. It is used to prioritise
+// which account or storage slot is the correct one if multiple iterators find the
+// same one (modified in multiple consecutive blocks).
+type weightedIterator struct {
+ it Iterator
+ priority int
+}
+
+func (it *weightedIterator) Cmp(other *weightedIterator) int {
+ // Order the iterators primarily by the account hashes
+ hashI := it.it.Hash()
+ hashJ := other.it.Hash()
+
+ switch bytes.Compare(hashI[:], hashJ[:]) {
+ case -1:
+ return -1
+ case 1:
+ return 1
+ }
+ // Same account/storage-slot in multiple layers, split by priority
+ if it.priority < other.priority {
+ return -1
+ }
+ if it.priority > other.priority {
+ return 1
+ }
+ return 0
+}
+
+// fastIterator is a more optimized multi-layer iterator which maintains a
+// direct mapping of all iterators leading down to the bottom layer.
+type fastIterator struct {
+ db *Database // Database to reinitialize stale sub-iterators with
+ root common.Hash // Root hash to reinitialize stale sub-iterators through
+
+ curAccount []byte
+ curSlot []byte
+
+ iterators []*weightedIterator
+ initiated bool
+ account bool
+ fail error
+}
+
+// newFastIterator creates a new hierarchical account or storage iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastIterator(db *Database, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) {
+ current := db.tree.get(root)
+ if current == nil {
+ return nil, fmt.Errorf("unknown snapshot: %x", root)
+ }
+ fi := &fastIterator{
+ db: db,
+ root: root,
+ account: accountIterator,
+ }
+loop:
+ for depth := 0; current != nil; depth++ {
+ if accountIterator {
+ switch dl := current.(type) {
+ case *diskLayer:
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: newDiffAccountIterator(seek, dl.buffer.states, dl.isStale),
+ priority: depth,
+ })
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: newDiskAccountIterator(dl.db.diskdb, seek),
+ priority: depth + 1,
+ })
+ case *diffLayer:
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: newDiffAccountIterator(seek, dl.states.stateSet, nil),
+ priority: depth,
+ })
+ }
+ } else {
+ // If the whole storage is destructed in this layer, don't
+ // bother deeper layer anymore. But we should still keep
+ // the iterator for this layer, since the iterator can contain
+ // some valid slots which belongs to the re-created account.
+ switch dl := current.(type) {
+ case *diskLayer:
+ it, destructed := newDiffStorageIterator(account, seek, dl.buffer.states, dl.isStale)
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: it,
+ priority: depth,
+ })
+ if destructed {
+ break loop
+ }
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: newDiskStorageIterator(dl.db.diskdb, account, seek),
+ priority: depth + 1,
+ })
+ case *diffLayer:
+ it, destructed := newDiffStorageIterator(account, seek, dl.states.stateSet, nil)
+ fi.iterators = append(fi.iterators, &weightedIterator{
+ it: it,
+ priority: depth,
+ })
+ if destructed {
+ break loop
+ }
+ }
+ }
+ current = current.parentLayer()
+ }
+ fi.init()
+ return fi, nil
+}
+
+// init walks over all the iterators and resolves any clashes between them, after
+// which it prepares the stack for step-by-step iteration.
+func (fi *fastIterator) init() {
+ // Track which account hashes are iterators positioned on
+ var positioned = make(map[common.Hash]int)
+
+ // Position all iterators and track how many remain live
+ for i := 0; i < len(fi.iterators); i++ {
+ // Retrieve the first element and if it clashes with a previous iterator,
+ // advance either the current one or the old one. Repeat until nothing is
+ // clashing any more.
+ it := fi.iterators[i]
+ for {
+ // If the iterator is exhausted, drop it off the end
+ if !it.it.Next() {
+ it.it.Release()
+ last := len(fi.iterators) - 1
+
+ fi.iterators[i] = fi.iterators[last]
+ fi.iterators[last] = nil
+ fi.iterators = fi.iterators[:last]
+
+ i--
+ break
+ }
+ // The iterator is still alive, check for collisions with previous ones
+ hash := it.it.Hash()
+ if other, exist := positioned[hash]; !exist {
+ positioned[hash] = i
+ break
+ } else {
+ // Iterators collide, one needs to be progressed, use priority to
+ // determine which.
+ //
+ // This whole else-block can be avoided, if we instead
+ // do an initial priority-sort of the iterators. If we do that,
+ // then we'll only wind up here if a lower-priority (preferred) iterator
+ // has the same value, and then we will always just continue.
+ // However, it costs an extra sort, so it's probably not better
+ if fi.iterators[other].priority < it.priority {
+ // The 'it' should be progressed
+ continue
+ } else {
+ // The 'other' should be progressed, swap them
+ it = fi.iterators[other]
+ fi.iterators[other], fi.iterators[i] = fi.iterators[i], fi.iterators[other]
+ continue
+ }
+ }
+ }
+ }
+ // Re-sort the entire list
+ slices.SortFunc(fi.iterators, func(a, b *weightedIterator) int { return a.Cmp(b) })
+ fi.initiated = false
+}
+
+// Next steps the iterator forward one element, returning false if exhausted.
+func (fi *fastIterator) Next() bool {
+ if len(fi.iterators) == 0 {
+ return false
+ }
+ if !fi.initiated {
+ // Don't forward first time -- we had to 'Next' once in order to
+ // do the sorting already
+ fi.initiated = true
+ if fi.account {
+ fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
+ } else {
+ fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
+ }
+ if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
+ fi.fail = innerErr
+ return false
+ }
+ if fi.curAccount != nil || fi.curSlot != nil {
+ return true
+ }
+ // Implicit else: we've hit a nil-account or nil-slot, and need to
+ // fall through to the loop below to land on something non-nil
+ }
+ // If an account or a slot is deleted in one of the layers, the key will
+ // still be there, but the actual value will be nil. However, the iterator
+ // should not export nil-values (but instead simply omit the key), so we
+ // need to loop here until we either
+ // - get a non-nil value,
+ // - hit an error,
+ // - or exhaust the iterator
+ for {
+ if !fi.next(0) {
+ return false // exhausted
+ }
+ if fi.account {
+ fi.curAccount = fi.iterators[0].it.(AccountIterator).Account()
+ } else {
+ fi.curSlot = fi.iterators[0].it.(StorageIterator).Slot()
+ }
+ if innerErr := fi.iterators[0].it.Error(); innerErr != nil {
+ fi.fail = innerErr
+ return false // error
+ }
+ if fi.curAccount != nil || fi.curSlot != nil {
+ break // non-nil value found
+ }
+ }
+ return true
+}
+
+// next handles the next operation internally and should be invoked when we know
+// that two elements in the list may have the same value.
+//
+// For example, if the iterated hashes become [2,3,5,5,8,9,10], then we should
+// invoke next(3), which will call Next on elem 3 (the second '5') and will
+// cascade along the list, applying the same operation if needed.
+func (fi *fastIterator) next(idx int) bool {
+ // If this particular iterator got exhausted, remove it and return true (the
+ // next one is surely not exhausted yet, otherwise it would have been removed
+ // already).
+ if it := fi.iterators[idx].it; !it.Next() {
+ it.Release()
+
+ fi.iterators = append(fi.iterators[:idx], fi.iterators[idx+1:]...)
+ return len(fi.iterators) > 0
+ }
+ // If there's no one left to cascade into, return
+ if idx == len(fi.iterators)-1 {
+ return true
+ }
+ // We next-ed the iterator at 'idx', now we may have to re-sort that element
+ var (
+ cur, next = fi.iterators[idx], fi.iterators[idx+1]
+ curHash, nextHash = cur.it.Hash(), next.it.Hash()
+ )
+ if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 {
+ // It is still in correct place
+ return true
+ } else if diff == 0 && cur.priority < next.priority {
+ // So still in correct place, but we need to iterate on the next
+ fi.next(idx + 1)
+ return true
+ }
+ // At this point, the iterator is in the wrong location, but the remaining
+ // list is sorted. Find out where to move the item.
+ clash := -1
+ index := sort.Search(len(fi.iterators), func(n int) bool {
+ // The iterator always advances forward, so anything before the old slot
+ // is known to be behind us, so just skip them altogether. This actually
+ // is an important clause since the sort order got invalidated.
+ if n < idx {
+ return false
+ }
+ if n == len(fi.iterators)-1 {
+ // Can always place an elem last
+ return true
+ }
+ nextHash := fi.iterators[n+1].it.Hash()
+ if diff := bytes.Compare(curHash[:], nextHash[:]); diff < 0 {
+ return true
+ } else if diff > 0 {
+ return false
+ }
+ // The elem we're placing it next to has the same value,
+ // so whichever winds up on n+1 will need further iteration
+ clash = n + 1
+
+ return cur.priority < fi.iterators[n+1].priority
+ })
+ fi.move(idx, index)
+ if clash != -1 {
+ fi.next(clash)
+ }
+ return true
+}
+
+// move advances an iterator to another position in the list.
+func (fi *fastIterator) move(index, newpos int) {
+ elem := fi.iterators[index]
+ copy(fi.iterators[index:], fi.iterators[index+1:newpos+1])
+ fi.iterators[newpos] = elem
+}
+
+// Error returns any failure that occurred during iteration, which might have
+// caused a premature iteration exit (e.g. snapshot stack becoming stale).
+func (fi *fastIterator) Error() error {
+ return fi.fail
+}
+
+// Hash returns the current key
+func (fi *fastIterator) Hash() common.Hash {
+ return fi.iterators[0].it.Hash()
+}
+
+// Account returns the current account blob.
+// Note the returned account is not a copy, please don't modify it.
+func (fi *fastIterator) Account() []byte {
+ return fi.curAccount
+}
+
+// Slot returns the current storage slot.
+// Note the returned slot is not a copy, please don't modify it.
+func (fi *fastIterator) Slot() []byte {
+ return fi.curSlot
+}
+
+// Release iterates over all the remaining live layer iterators and releases each
+// of them individually.
+func (fi *fastIterator) Release() {
+ for _, it := range fi.iterators {
+ it.it.Release()
+ }
+ fi.iterators = nil
+}
+
+// Debug is a convenience helper during testing
+func (fi *fastIterator) Debug() {
+ for _, it := range fi.iterators {
+ fmt.Printf("[p=%v v=%v] ", it.priority, it.it.Hash()[0])
+ }
+ fmt.Println()
+}
+
+// newFastAccountIterator creates a new hierarchical account iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastAccountIterator(db *Database, root common.Hash, seek common.Hash) (AccountIterator, error) {
+ return newFastIterator(db, root, common.Hash{}, seek, true)
+}
+
+// newFastStorageIterator creates a new hierarchical storage iterator with one
+// element per diff layer. The returned combo iterator can be used to walk over
+// the entire snapshot diff stack simultaneously.
+func newFastStorageIterator(db *Database, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
+ return newFastIterator(db, root, account, seek, false)
+}
diff --git a/triedb/pathdb/iterator_test.go b/triedb/pathdb/iterator_test.go
new file mode 100644
index 0000000000..eccee65623
--- /dev/null
+++ b/triedb/pathdb/iterator_test.go
@@ -0,0 +1,1047 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package pathdb
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/internal/testrand"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/trie/trienode"
+ "github.com/holiman/uint256"
+)
+
+type verifyContent int
+
+const (
+ verifyNothing verifyContent = iota
+ verifyAccount
+ verifyStorage
+)
+
+func verifyIterator(t *testing.T, expCount int, it Iterator, verify verifyContent) {
+ t.Helper()
+
+ var (
+ count = 0
+ last = common.Hash{}
+ )
+ for it.Next() {
+ hash := it.Hash()
+ if bytes.Compare(last[:], hash[:]) >= 0 {
+ t.Errorf("wrong order: %x >= %x", last, hash)
+ }
+ count++
+ if verify == verifyAccount && len(it.(AccountIterator).Account()) == 0 {
+ t.Errorf("iterator returned nil-value for hash %x", hash)
+ } else if verify == verifyStorage && len(it.(StorageIterator).Slot()) == 0 {
+ t.Errorf("iterator returned nil-value for hash %x", hash)
+ }
+ last = hash
+ }
+ if count != expCount {
+ t.Errorf("iterator count mismatch: have %d, want %d", count, expCount)
+ }
+ if err := it.Error(); err != nil {
+ t.Errorf("iterator failed: %v", err)
+ }
+}
+
+// randomAccount generates a random account and returns it RLP encoded.
+func randomAccount() []byte {
+ a := &types.StateAccount{
+ Balance: uint256.NewInt(rand.Uint64()),
+ Nonce: rand.Uint64(),
+ Root: testrand.Hash(),
+ CodeHash: types.EmptyCodeHash[:],
+ }
+ data, _ := rlp.EncodeToBytes(a)
+ return data
+}
+
+// randomAccountSet generates a set of random accounts with the given strings as
+// the account address hashes.
+func randomAccountSet(hashes ...string) map[common.Hash][]byte {
+ accounts := make(map[common.Hash][]byte)
+ for _, hash := range hashes {
+ accounts[common.HexToHash(hash)] = randomAccount()
+ }
+ return accounts
+}
+
+// randomStorageSet generates a set of random slots with the given strings as
+// the slot addresses.
+func randomStorageSet(accounts []string, hashes [][]string, nilStorage [][]string) map[common.Hash]map[common.Hash][]byte {
+ storages := make(map[common.Hash]map[common.Hash][]byte)
+ for index, account := range accounts {
+ storages[common.HexToHash(account)] = make(map[common.Hash][]byte)
+
+ if index < len(hashes) {
+ hashes := hashes[index]
+ for _, hash := range hashes {
+ storages[common.HexToHash(account)][common.HexToHash(hash)] = testrand.Bytes(32)
+ }
+ }
+ if index < len(nilStorage) {
+ nils := nilStorage[index]
+ for _, hash := range nils {
+ storages[common.HexToHash(account)][common.HexToHash(hash)] = nil
+ }
+ }
+ }
+ return storages
+}
+
+// TestAccountIteratorBasics tests some simple single-layer(diff and disk) iteration
+func TestAccountIteratorBasics(t *testing.T) {
+ var (
+ destructs = make(map[common.Hash]struct{})
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Fill up a parent
+ for i := 0; i < 100; i++ {
+ hash := testrand.Hash()
+ data := testrand.Bytes(32)
+
+ accounts[hash] = data
+ if rand.Intn(4) == 0 {
+ destructs[hash] = struct{}{}
+ }
+ if rand.Intn(2) == 0 {
+ accStorage := make(map[common.Hash][]byte)
+ accStorage[testrand.Hash()] = testrand.Bytes(32)
+ storage[hash] = accStorage
+ }
+ }
+ states := newStates(destructs, accounts, storage)
+ it := newDiffAccountIterator(common.Hash{}, states, nil)
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+
+ // TODO reenable these tests once the persistent state iteration
+ // is implemented.
+
+ //db := rawdb.NewMemoryDatabase()
+ //batch := db.NewBatch()
+ //states.write(db, batch, nil, nil)
+ //batch.Write()
+ //it = newDiskAccountIterator(db, common.Hash{})
+ //verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+}
+
+// TestStorageIteratorBasics tests some simple single-layer(diff and disk) iteration for storage
+func TestStorageIteratorBasics(t *testing.T) {
+ var (
+ nilStorage = make(map[common.Hash]int)
+ accounts = make(map[common.Hash][]byte)
+ storage = make(map[common.Hash]map[common.Hash][]byte)
+ )
+ // Fill some random data
+ for i := 0; i < 10; i++ {
+ hash := testrand.Hash()
+ accounts[hash] = testrand.Bytes(32)
+
+ accStorage := make(map[common.Hash][]byte)
+
+ var nilstorage int
+ for i := 0; i < 100; i++ {
+ if rand.Intn(2) == 0 {
+ accStorage[testrand.Hash()] = testrand.Bytes(32)
+ } else {
+ accStorage[testrand.Hash()] = nil // delete slot
+ nilstorage += 1
+ }
+ }
+ storage[hash] = accStorage
+ nilStorage[hash] = nilstorage
+ }
+ states := newStates(nil, accounts, storage)
+ for account := range accounts {
+ it, _ := newDiffStorageIterator(account, common.Hash{}, states, nil)
+ verifyIterator(t, 100, it, verifyNothing) // Nil is allowed for single layer iterator
+ }
+
+ // TODO reenable these tests once the persistent state iteration
+ // is implemented.
+
+ //db := rawdb.NewMemoryDatabase()
+ //batch := db.NewBatch()
+ //states.write(db, batch, nil, nil)
+ //batch.Write()
+ //for account := range accounts {
+ // it := newDiskStorageIterator(db, account, common.Hash{})
+ // verifyIterator(t, 100-nilStorage[account], it, verifyNothing) // Nil is allowed for single layer iterator
+ //}
+}
+
+type testIterator struct {
+ values []byte
+}
+
+func newTestIterator(values ...byte) *testIterator {
+ return &testIterator{values}
+}
+
+func (ti *testIterator) Seek(common.Hash) {
+ panic("implement me")
+}
+
+func (ti *testIterator) Next() bool {
+ ti.values = ti.values[1:]
+ return len(ti.values) > 0
+}
+
+func (ti *testIterator) Error() error {
+ return nil
+}
+
+func (ti *testIterator) Hash() common.Hash {
+ return common.BytesToHash([]byte{ti.values[0]})
+}
+
+func (ti *testIterator) Account() []byte {
+ return nil
+}
+
+func (ti *testIterator) Slot() []byte {
+ return nil
+}
+
+func (ti *testIterator) Release() {}
+
+func TestFastIteratorBasics(t *testing.T) {
+ type testCase struct {
+ lists [][]byte
+ expKeys []byte
+ }
+ for i, tc := range []testCase{
+ {lists: [][]byte{{0, 1, 8}, {1, 2, 8}, {2, 9}, {4},
+ {7, 14, 15}, {9, 13, 15, 16}},
+ expKeys: []byte{0, 1, 2, 4, 7, 8, 9, 13, 14, 15, 16}},
+ {lists: [][]byte{{0, 8}, {1, 2, 8}, {7, 14, 15}, {8, 9},
+ {9, 10}, {10, 13, 15, 16}},
+ expKeys: []byte{0, 1, 2, 7, 8, 9, 10, 13, 14, 15, 16}},
+ } {
+ var iterators []*weightedIterator
+ for i, data := range tc.lists {
+ it := newTestIterator(data...)
+ iterators = append(iterators, &weightedIterator{it, i})
+ }
+ fi := &fastIterator{
+ iterators: iterators,
+ initiated: false,
+ }
+ count := 0
+ for fi.Next() {
+ if got, exp := fi.Hash()[31], tc.expKeys[count]; exp != got {
+ t.Errorf("tc %d, [%d]: got %d exp %d", i, count, got, exp)
+ }
+ count++
+ }
+ }
+}
+
+// TestAccountIteratorTraversal tests some simple multi-layer iteration.
+func TestAccountIteratorTraversal(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ // Stack three diff layers on top with various overlaps
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+
+ // Verify the single and multi-layer iterators
+ head := db.tree.get(common.HexToHash("0x04"))
+
+ it := newDiffAccountIterator(common.Hash{}, head.(*diffLayer).states.stateSet, nil)
+ verifyIterator(t, 3, it, verifyNothing)
+ verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ = db.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ verifyIterator(t, 7, it, verifyAccount)
+ it.Release()
+
+ // TODO reenable these tests once the persistent state iteration
+ // is implemented.
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ //db.tree.cap(common.HexToHash("0x04"), 2)
+
+ //head = db.tree.get(common.HexToHash("0x04"))
+ //verifyIterator(t, 7, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+ //
+ //it, _ = db.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ //verifyIterator(t, 7, it, verifyAccount)
+ //it.Release()
+}
+
+func TestStorageIteratorTraversal(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ // Stack three diff layers on top with various overlaps
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 0, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil))
+
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 0, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x04", "0x05", "0x06"}}, nil), nil, nil))
+
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 0, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x02", "0x03"}}, nil), nil, nil))
+
+ // Verify the single and multi-layer iterators
+ head := db.tree.get(common.HexToHash("0x04"))
+
+ diffIter, _ := newDiffStorageIterator(common.HexToHash("0xaa"), common.Hash{}, head.(*diffLayer).states.stateSet, nil)
+ verifyIterator(t, 3, diffIter, verifyNothing)
+ verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+
+ it, _ := db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 6, it, verifyStorage)
+ it.Release()
+
+ // TODO reenable these tests once the persistent state iteration
+ // is implemented.
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ //db.tree.cap(common.HexToHash("0x04"), 2)
+ //verifyIterator(t, 6, head.(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+ //
+ //it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ //verifyIterator(t, 6, it, verifyStorage)
+ //it.Release()
+}
+
+// TestAccountIteratorTraversalValues tests some multi-layer iteration, where we
+// also expect the correct values to show up.
+func TestAccountIteratorTraversalValues(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ // Create a batch of account sets to seed subsequent layers with
+ var (
+ a = make(map[common.Hash][]byte)
+ b = make(map[common.Hash][]byte)
+ c = make(map[common.Hash][]byte)
+ d = make(map[common.Hash][]byte)
+ e = make(map[common.Hash][]byte)
+ f = make(map[common.Hash][]byte)
+ g = make(map[common.Hash][]byte)
+ h = make(map[common.Hash][]byte)
+ )
+ for i := byte(2); i < 0xff; i++ {
+ a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i))
+ if i > 20 && i%2 == 0 {
+ b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i))
+ }
+ if i%4 == 0 {
+ c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i))
+ }
+ if i%7 == 0 {
+ d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i))
+ }
+ if i%8 == 0 {
+ e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i))
+ }
+ if i > 50 || i < 85 {
+ f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i))
+ }
+ if i%64 == 0 {
+ g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i))
+ }
+ if i%128 == 0 {
+ h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i))
+ }
+ }
+ // Assemble a stack of snapshots from the account layers
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, a, nil, nil, nil))
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, b, nil, nil, nil))
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, c, nil, nil, nil))
+ db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, d, nil, nil, nil))
+ db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, e, nil, nil, nil))
+ db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, f, nil, nil, nil))
+ db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, g, nil, nil, nil))
+ db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, h, nil, nil, nil))
+
+ it, _ := db.AccountIterator(common.HexToHash("0x09"), common.Hash{})
+ head, _ := db.StateReader(common.HexToHash("0x09"))
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.(*reader).AccountRLP(hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected account: %v", err)
+ }
+ if have := it.Account(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+
+ // TODO reenable these tests once the persistent state iteration
+ // is implemented.
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ //db.tree.cap(common.HexToHash("0x09"), 2)
+ //
+ //it, _ = db.AccountIterator(common.HexToHash("0x09"), common.Hash{})
+ //for it.Next() {
+ // hash := it.Hash()
+ // account, err := head.Account(hash)
+ // if err != nil {
+ // t.Fatalf("failed to retrieve expected account: %v", err)
+ // }
+ // want, _ := rlp.EncodeToBytes(account)
+ // if have := it.Account(); !bytes.Equal(want, have) {
+ // t.Fatalf("hash %x: account mismatch: have %x, want %x", hash, have, want)
+ // }
+ //}
+ //it.Release()
+}
+
+func TestStorageIteratorTraversalValues(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ wrapStorage := func(storage map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte {
+ return map[common.Hash]map[common.Hash][]byte{
+ common.HexToHash("0xaa"): storage,
+ }
+ }
+ // Create a batch of storage sets to seed subsequent layers with
+ var (
+ a = make(map[common.Hash][]byte)
+ b = make(map[common.Hash][]byte)
+ c = make(map[common.Hash][]byte)
+ d = make(map[common.Hash][]byte)
+ e = make(map[common.Hash][]byte)
+ f = make(map[common.Hash][]byte)
+ g = make(map[common.Hash][]byte)
+ h = make(map[common.Hash][]byte)
+ )
+ for i := byte(2); i < 0xff; i++ {
+ a[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 0, i))
+ if i > 20 && i%2 == 0 {
+ b[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 1, i))
+ }
+ if i%4 == 0 {
+ c[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 2, i))
+ }
+ if i%7 == 0 {
+ d[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 3, i))
+ }
+ if i%8 == 0 {
+ e[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 4, i))
+ }
+ if i > 50 || i < 85 {
+ f[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 5, i))
+ }
+ if i%64 == 0 {
+ g[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 6, i))
+ }
+ if i%128 == 0 {
+ h[common.Hash{i}] = []byte(fmt.Sprintf("layer-%d, key %d", 7, i))
+ }
+ }
+ // Assemble a stack of snapshots from the account layers
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 2, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), wrapStorage(a), nil, nil))
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 3, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), wrapStorage(b), nil, nil))
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 4, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), wrapStorage(c), nil, nil))
+ db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 5, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), wrapStorage(d), nil, nil))
+ db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 6, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), wrapStorage(e), nil, nil))
+ db.Update(common.HexToHash("0x07"), common.HexToHash("0x06"), 7, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), wrapStorage(f), nil, nil))
+ db.Update(common.HexToHash("0x08"), common.HexToHash("0x07"), 8, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), wrapStorage(g), nil, nil))
+ db.Update(common.HexToHash("0x09"), common.HexToHash("0x08"), 9, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), wrapStorage(h), nil, nil))
+
+ it, _ := db.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
+ head, _ := db.StateReader(common.HexToHash("0x09"))
+ for it.Next() {
+ hash := it.Hash()
+ want, err := head.Storage(common.HexToHash("0xaa"), hash)
+ if err != nil {
+ t.Fatalf("failed to retrieve expected storage slot: %v", err)
+ }
+ if have := it.Slot(); !bytes.Equal(want, have) {
+ t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
+ }
+ }
+ it.Release()
+
+ // TODO reenable these tests once the persistent state iteration
+ // is implemented.
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ //db.tree.cap(common.HexToHash("0x09"), 2)
+ //
+ //it, _ = db.StorageIterator(common.HexToHash("0x09"), common.HexToHash("0xaa"), common.Hash{})
+ //for it.Next() {
+ // hash := it.Hash()
+ // want, err := head.Storage(common.HexToHash("0xaa"), hash)
+ // if err != nil {
+ // t.Fatalf("failed to retrieve expected slot: %v", err)
+ // }
+ // if have := it.Slot(); !bytes.Equal(want, have) {
+ // t.Fatalf("hash %x: slot mismatch: have %x, want %x", hash, have, want)
+ // }
+ //}
+ //it.Release()
+}
+
+// This testcase is notorious, all layers contain the exact same 200 accounts.
+func TestAccountIteratorLargeTraversal(t *testing.T) {
+ // Create a custom account factory to recreate the same addresses
+ makeAccounts := func(num int) map[common.Hash][]byte {
+ accounts := make(map[common.Hash][]byte)
+ for i := 0; i < num; i++ {
+ h := common.Hash{}
+ binary.BigEndian.PutUint64(h[:], uint64(i+1))
+ accounts[h] = randomAccount()
+ }
+ return accounts
+ }
+ // Build up a large stack of snapshots
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+ for i := 1; i < 128; i++ {
+ parent := types.EmptyRootHash
+ if i == 1 {
+ parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
+ }
+ db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, makeAccounts(200), nil, nil, nil))
+ }
+ // Iterate the entire stack and ensure everything is hit only once
+ head := db.tree.get(common.HexToHash("0x80"))
+ verifyIterator(t, 200, newDiffAccountIterator(common.Hash{}, head.(*diffLayer).states.stateSet, nil), verifyNothing)
+ verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+
+ it, _ := db.AccountIterator(common.HexToHash("0x80"), common.Hash{})
+ verifyIterator(t, 200, it, verifyAccount)
+ it.Release()
+
+ // TODO reenable these tests once the persistent state iteration
+ // is implemented.
+
+ // Test after persist some bottom-most layers into the disk,
+ // the functionalities still work.
+ //db.tree.cap(common.HexToHash("0x80"), 2)
+ //
+ //verifyIterator(t, 200, head.(*diffLayer).newBinaryAccountIterator(), verifyAccount)
+ //
+ //it, _ = db.AccountIterator(common.HexToHash("0x80"), common.Hash{})
+ //verifyIterator(t, 200, it, verifyAccount)
+ //it.Release()
+}
+
+// TestAccountIteratorFlattening tests what happens when we
+// - have a live iterator on child C (parent C1 -> C2 .. CN)
+// - flattens C2 all the way into CN
+// - continues iterating
+func TestAccountIteratorFlattening(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ // Create a stack of diffs on top
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+
+ // Create an iterator and flatten the data from underneath it
+ it, _ := db.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ defer it.Release()
+
+ if err := db.tree.cap(common.HexToHash("0x04"), 1); err != nil {
+ t.Fatalf("failed to flatten snapshot stack: %v", err)
+ }
+ //verifyIterator(t, 7, it)
+}
+
+func TestAccountIteratorSeek(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa", "0xee", "0xff", "0xf0"), nil, nil, nil))
+
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xbb", "0xdd", "0xf0"), nil, nil, nil))
+
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xcc", "0xf0", "0xff"), nil, nil, nil))
+
+ // Account set is now
+ // 02: aa, ee, f0, ff
+ // 03: aa, bb, dd, ee, f0 (, f0), ff
+ // 04: aa, bb, cc, dd, ee, f0 (, f0), ff (, ff)
+ // Construct various iterators and ensure their traversal is correct
+ it, _ := db.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xdd"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyAccount) // expected: ee, f0, ff
+
+ it, _ = db.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"))
+ defer it.Release()
+ verifyIterator(t, 4, it, verifyAccount) // expected: aa, ee, f0, ff
+
+ it, _ = db.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyAccount) // expected: ff
+
+ it, _ = db.AccountIterator(common.HexToHash("0x02"), common.HexToHash("0xff1"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyAccount) // expected: nothing
+
+ it, _ = db.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xbb"))
+ defer it.Release()
+ verifyIterator(t, 6, it, verifyAccount) // expected: bb, cc, dd, ee, f0, ff
+
+ it, _ = db.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xef"))
+ defer it.Release()
+ verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
+
+ it, _ = db.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xf0"))
+ defer it.Release()
+ verifyIterator(t, 2, it, verifyAccount) // expected: f0, ff
+
+ it, _ = db.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyAccount) // expected: ff
+
+ it, _ = db.AccountIterator(common.HexToHash("0x04"), common.HexToHash("0xff1"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyAccount) // expected: nothing
+}
+
+func TestStorageIteratorSeek(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ // Stack three diff layers on top with various overlaps
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil))
+
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x05", "0x06"}}, nil), nil, nil))
+
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x05", "0x08"}}, nil), nil, nil))
+
+ // Account set is now
+ // 02: 01, 03, 05
+ // 03: 01, 02, 03, 05 (, 05), 06
+ // 04: 01(, 01), 02, 03, 05(, 05, 05), 06, 08
+ // Construct various iterators and ensure their traversal is correct
+ it, _ := db.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyStorage) // expected: 01, 03, 05
+
+ it, _ = db.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x02"))
+ defer it.Release()
+ verifyIterator(t, 2, it, verifyStorage) // expected: 03, 05
+
+ it, _ = db.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x5"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyStorage) // expected: 05
+
+ it, _ = db.StorageIterator(common.HexToHash("0x02"), common.HexToHash("0xaa"), common.HexToHash("0x6"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyStorage) // expected: nothing
+
+ it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x01"))
+ defer it.Release()
+ verifyIterator(t, 6, it, verifyStorage) // expected: 01, 02, 03, 05, 06, 08
+
+ it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x05"))
+ defer it.Release()
+ verifyIterator(t, 3, it, verifyStorage) // expected: 05, 06, 08
+
+ it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x08"))
+ defer it.Release()
+ verifyIterator(t, 1, it, verifyStorage) // expected: 08
+
+ it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.HexToHash("0x09"))
+ defer it.Release()
+ verifyIterator(t, 0, it, verifyStorage) // expected: nothing
+}
+
+// TestAccountIteratorDeletions tests that the iterator behaves correct when there are
+// deleted accounts (where the Account() value is nil). The iterator
+// should not output any accounts or nil-values for those cases.
+func TestAccountIteratorDeletions(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ // Stack three diff layers on top with various overlaps
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0x11", "0x22", "0x33"), nil, nil, nil))
+
+ deleted := common.HexToHash("0x22")
+ destructed := map[common.Hash]struct{}{
+ deleted: {},
+ }
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(destructed, randomAccountSet("0x11", "0x33"), nil, nil, nil))
+
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0x33", "0x44", "0x55"), nil, nil, nil))
+
+ // The output should be 11,33,44,55
+ it, _ := db.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ // Do a quick check
+ verifyIterator(t, 4, it, verifyAccount)
+ it.Release()
+
+ // And a more detailed verification that we indeed do not see '0x22'
+ it, _ = db.AccountIterator(common.HexToHash("0x04"), common.Hash{})
+ defer it.Release()
+ for it.Next() {
+ hash := it.Hash()
+ if it.Account() == nil {
+ t.Errorf("iterator returned nil-value for hash %x", hash)
+ }
+ if hash == deleted {
+ t.Errorf("expected deleted elem %x to not be returned by iterator", deleted)
+ }
+ }
+}
+
+func TestStorageIteratorDeletions(t *testing.T) {
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ // Stack three diff layers on top with various overlaps
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x01", "0x03", "0x05"}}, nil), nil, nil))
+
+ db.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), 2, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x02", "0x04", "0x06"}}, [][]string{{"0x01", "0x03"}}), nil, nil))
+
+ // The output should be 02,04,05,06
+ it, _ := db.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 4, it, verifyStorage)
+ it.Release()
+
+ // The output should be 04,05,06
+ it, _ = db.StorageIterator(common.HexToHash("0x03"), common.HexToHash("0xaa"), common.HexToHash("0x03"))
+ verifyIterator(t, 3, it, verifyStorage)
+ it.Release()
+
+ // Destruct the whole storage
+ destructed := map[common.Hash]struct{}{
+ common.HexToHash("0xaa"): {},
+ }
+ db.Update(common.HexToHash("0x04"), common.HexToHash("0x03"), 3, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(destructed, nil, nil, nil, nil))
+
+ it, _ = db.StorageIterator(common.HexToHash("0x04"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 0, it, verifyStorage)
+ it.Release()
+
+ // Re-insert the slots of the same account
+ db.Update(common.HexToHash("0x05"), common.HexToHash("0x04"), 4, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(nil, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x07", "0x08", "0x09"}}, nil), nil, nil))
+
+ // The output should be 07,08,09
+ it, _ = db.StorageIterator(common.HexToHash("0x05"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 3, it, verifyStorage)
+ it.Release()
+
+ // Destruct the whole storage but re-create the account in the same layer
+ db.Update(common.HexToHash("0x06"), common.HexToHash("0x05"), 5, trienode.NewMergedNodeSet(),
+ NewStateSetWithOrigin(destructed, randomAccountSet("0xaa"), randomStorageSet([]string{"0xaa"}, [][]string{{"0x11", "0x12"}}, nil), nil, nil))
+
+ it, _ = db.StorageIterator(common.HexToHash("0x06"), common.HexToHash("0xaa"), common.Hash{})
+ verifyIterator(t, 2, it, verifyStorage) // The output should be 11,12
+ it.Release()
+
+ verifyIterator(t, 2, db.tree.get(common.HexToHash("0x06")).(*diffLayer).newBinaryStorageIterator(common.HexToHash("0xaa")), verifyStorage)
+}
+
+// BenchmarkAccountIteratorTraversal is a bit notorious -- all layers contain the
+// exact same 200 accounts. That means that we need to process 2000 items, but
+// only spit out 200 values eventually.
+//
+// The value-fetching benchmark is easy on the binary iterator, since it never has to reach
+// down at any depth for retrieving the values -- all are on the topmost layer
+//
+// BenchmarkAccountIteratorTraversal/binary_iterator_keys-8 759984 1566 ns/op
+// BenchmarkAccountIteratorTraversal/binary_iterator_values-8 150028 7900 ns/op
+// BenchmarkAccountIteratorTraversal/fast_iterator_keys-8 172809 7006 ns/op
+// BenchmarkAccountIteratorTraversal/fast_iterator_values-8 165112 7658 ns/op
+func BenchmarkAccountIteratorTraversal(b *testing.B) {
+ // Create a custom account factory to recreate the same addresses
+ makeAccounts := func(num int) map[common.Hash][]byte {
+ accounts := make(map[common.Hash][]byte)
+ for i := 0; i < num; i++ {
+ h := common.Hash{}
+ binary.BigEndian.PutUint64(h[:], uint64(i+1))
+ accounts[h] = randomAccount()
+ }
+ return accounts
+ }
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ for i := 1; i <= 100; i++ {
+ parent := types.EmptyRootHash
+ if i == 1 {
+ parent = common.HexToHash(fmt.Sprintf("0x%02x", i))
+ }
+ db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), parent, uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, makeAccounts(200), nil, nil, nil))
+ }
+ // We call this once before the benchmark, so the creation of
+ // sorted accountlists are not included in the results.
+ head := db.tree.get(common.HexToHash("0x65"))
+ head.(*diffLayer).newBinaryAccountIterator()
+
+ b.Run("binary iterator keys", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ got := 0
+ it := head.(*diffLayer).newBinaryAccountIterator()
+ for it.Next() {
+ got++
+ }
+ if exp := 200; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("binary iterator values", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ got := 0
+ it := head.(*diffLayer).newBinaryAccountIterator()
+ for it.Next() {
+ got++
+ head.(*diffLayer).account(it.Hash(), 0)
+ }
+ if exp := 200; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("fast iterator keys", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ it, _ := db.AccountIterator(common.HexToHash("0x65"), common.Hash{})
+ defer it.Release()
+
+ got := 0
+ for it.Next() {
+ got++
+ }
+ if exp := 200; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("fast iterator values", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ it, _ := db.AccountIterator(common.HexToHash("0x65"), common.Hash{})
+ defer it.Release()
+
+ got := 0
+ for it.Next() {
+ got++
+ it.Account()
+ }
+ if exp := 200; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+}
+
+// BenchmarkAccountIteratorLargeBaselayer is a pretty realistic benchmark, where
+// the baselayer is a lot larger than the upper layer.
+//
+// This is heavy on the binary iterator, which in most cases will have to
+// call recursively 100 times for the majority of the values
+//
+// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(keys)-6 514 1971999 ns/op
+// BenchmarkAccountIteratorLargeBaselayer/binary_iterator_(values)-6 61 18997492 ns/op
+// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(keys)-6 10000 114385 ns/op
+// BenchmarkAccountIteratorLargeBaselayer/fast_iterator_(values)-6 4047 296823 ns/op
+func BenchmarkAccountIteratorLargeBaselayer(b *testing.B) {
+ // Create a custom account factory to recreate the same addresses
+ makeAccounts := func(num int) map[common.Hash][]byte {
+ accounts := make(map[common.Hash][]byte)
+ for i := 0; i < num; i++ {
+ h := common.Hash{}
+ binary.BigEndian.PutUint64(h[:], uint64(i+1))
+ accounts[h] = randomAccount()
+ }
+ return accounts
+ }
+ config := &Config{
+ WriteBufferSize: 0,
+ }
+ db := New(rawdb.NewMemoryDatabase(), config, false)
+ // db.WaitGeneration()
+
+ db.Update(common.HexToHash("0x02"), types.EmptyRootHash, 1, trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, makeAccounts(2000), nil, nil, nil))
+ for i := 2; i <= 100; i++ {
+ db.Update(common.HexToHash(fmt.Sprintf("0x%02x", i+1)), common.HexToHash(fmt.Sprintf("0x%02x", i)), uint64(i), trienode.NewMergedNodeSet(), NewStateSetWithOrigin(nil, makeAccounts(20), nil, nil, nil))
+ }
+ // We call this once before the benchmark, so the creation of
+ // sorted accountlists are not included in the results.
+ head := db.tree.get(common.HexToHash("0x65"))
+ head.(*diffLayer).newBinaryAccountIterator()
+
+ b.Run("binary iterator (keys)", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ got := 0
+ it := head.(*diffLayer).newBinaryAccountIterator()
+ for it.Next() {
+ got++
+ }
+ if exp := 2000; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("binary iterator (values)", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ got := 0
+ it := head.(*diffLayer).newBinaryAccountIterator()
+ for it.Next() {
+ got++
+ v := it.Hash()
+ head.(*diffLayer).account(v, 0)
+ }
+ if exp := 2000; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("fast iterator (keys)", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ it, _ := db.AccountIterator(common.HexToHash("0x65"), common.Hash{})
+ defer it.Release()
+
+ got := 0
+ for it.Next() {
+ got++
+ }
+ if exp := 2000; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+ b.Run("fast iterator (values)", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ it, _ := db.AccountIterator(common.HexToHash("0x65"), common.Hash{})
+ defer it.Release()
+
+ got := 0
+ for it.Next() {
+ it.Account()
+ got++
+ }
+ if exp := 2000; got != exp {
+ b.Errorf("iterator len wrong, expected %d, got %d", exp, got)
+ }
+ }
+ })
+}
+
+/*
+func BenchmarkBinaryAccountIteration(b *testing.B) {
+ benchmarkAccountIteration(b, func(snap snapshot) AccountIterator {
+ return snap.(*diffLayer).newBinaryAccountIterator()
+ })
+}
+
+func BenchmarkFastAccountIteration(b *testing.B) {
+ benchmarkAccountIteration(b, newFastAccountIterator)
+}
+
+func benchmarkAccountIteration(b *testing.B, iterator func(snap snapshot) AccountIterator) {
+ // Create a diff stack and randomize the accounts across them
+ layers := make([]map[common.Hash][]byte, 128)
+ for i := 0; i < len(layers); i++ {
+ layers[i] = make(map[common.Hash][]byte)
+ }
+ for i := 0; i < b.N; i++ {
+ depth := rand.Intn(len(layers))
+ layers[depth][randomHash()] = randomAccount()
+ }
+ stack := snapshot(emptyLayer())
+ for _, layer := range layers {
+ stack = stack.Update(common.Hash{}, layer, nil, nil)
+ }
+ // Reset the timers and report all the stats
+ it := iterator(stack)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for it.Next() {
+ }
+}
+*/
diff --git a/triedb/pathdb/reader.go b/triedb/pathdb/reader.go
index 3067a0c1af..b610e0d0c1 100644
--- a/triedb/pathdb/reader.go
+++ b/triedb/pathdb/reader.go
@@ -86,6 +86,17 @@ func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte,
return blob, nil
}
+// AccountRLP directly retrieves the account associated with a particular hash.
+// An error will be returned if the read operation exits abnormally. Specifically,
+// if the layer is already stale.
+//
+// Note:
+// - the returned account data is not a copy, please don't modify it
+// - no error will be returned if the requested account is not found in database
+func (r *reader) AccountRLP(hash common.Hash) ([]byte, error) {
+ return r.layer.account(hash, 0)
+}
+
// Account directly retrieves the account associated with a particular hash in
// the slim data format. An error will be returned if the read operation exits
// abnormally. Specifically, if the layer is already stale.
diff --git a/triedb/pathdb/states.go b/triedb/pathdb/states.go
index 2210e20538..ca58a05c28 100644
--- a/triedb/pathdb/states.go
+++ b/triedb/pathdb/states.go
@@ -192,8 +192,6 @@ func (s *stateSet) check() uint64 {
// the deleted ones.
//
// Note, the returned slice is not a copy, so do not modify it.
-//
-//nolint:unused
func (s *stateSet) accountList() []common.Hash {
// If an old list already exists, return it
s.lock.RLock()
@@ -229,8 +227,6 @@ func (s *stateSet) accountList() []common.Hash {
// not empty but the flag is true.
//
// Note, the returned slice is not a copy, so do not modify it.
-//
-//nolint:unused
func (s *stateSet) storageList(accountHash common.Hash) ([]common.Hash, bool) {
s.lock.RLock()
_, destructed := s.destructSet[accountHash]