mirror of https://github.com/ethereum/go-ethereum
core, eth/protocols/snap, trie: fix cause for snap-sync corruption, implement gentrie (#29313)
This pull request defines a gentrie for snap sync purpose. The stackTrie is used to generate the merkle tree nodes upon receiving a state batch. Several additional options have been added into stackTrie to handle incomplete states (either missing states before or after). In this pull request, these options have been relocated from stackTrie to genTrie, which serves as a wrapper for stackTrie specifically for snap sync purposes. Further, the logic for managing incomplete state has been enhanced in this change. Originally, there are two cases handled: - boundary node filtering - internal (covered by extension node) node clearing This changes adds one more: - Clearing leftover nodes on the boundaries. This feature is necessary if there are leftover trie nodes in database, otherwise node inconsistency may break the state healing.pull/29553/head
parent
ef5ac3fb7a
commit
d3c4466edd
@ -0,0 +1,287 @@ |
|||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package snap |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
) |
||||||
|
|
||||||
|
// genTrie interface is used by the snap syncer to generate merkle tree nodes
|
||||||
|
// based on a received batch of states.
|
||||||
|
type genTrie interface { |
||||||
|
// update inserts the state item into generator trie.
|
||||||
|
update(key, value []byte) error |
||||||
|
|
||||||
|
// commit flushes the right boundary nodes if complete flag is true. This
|
||||||
|
// function must be called before flushing the associated database batch.
|
||||||
|
commit(complete bool) common.Hash |
||||||
|
} |
||||||
|
|
||||||
|
// pathTrie is a wrapper over the stackTrie, incorporating numerous additional
|
||||||
|
// logics to handle the semi-completed trie and potential leftover dangling
|
||||||
|
// nodes in the database. It is utilized for constructing the merkle tree nodes
|
||||||
|
// in path mode during the snap sync process.
|
||||||
|
type pathTrie struct { |
||||||
|
owner common.Hash // identifier of trie owner, empty for account trie
|
||||||
|
tr *trie.StackTrie // underlying raw stack trie
|
||||||
|
first []byte // the path of first committed node by stackTrie
|
||||||
|
last []byte // the path of last committed node by stackTrie
|
||||||
|
|
||||||
|
// This flag indicates whether nodes on the left boundary are skipped for
|
||||||
|
// committing. If set, the left boundary nodes are considered incomplete
|
||||||
|
// due to potentially missing left children.
|
||||||
|
skipLeftBoundary bool |
||||||
|
db ethdb.KeyValueReader |
||||||
|
batch ethdb.Batch |
||||||
|
} |
||||||
|
|
||||||
|
// newPathTrie initializes the path trie.
|
||||||
|
func newPathTrie(owner common.Hash, skipLeftBoundary bool, db ethdb.KeyValueReader, batch ethdb.Batch) *pathTrie { |
||||||
|
tr := &pathTrie{ |
||||||
|
owner: owner, |
||||||
|
skipLeftBoundary: skipLeftBoundary, |
||||||
|
db: db, |
||||||
|
batch: batch, |
||||||
|
} |
||||||
|
tr.tr = trie.NewStackTrie(tr.onTrieNode) |
||||||
|
return tr |
||||||
|
} |
||||||
|
|
||||||
|
// onTrieNode is invoked whenever a new node is committed by the stackTrie.
|
||||||
|
//
|
||||||
|
// As the committed nodes might be incomplete if they are on the boundaries
|
||||||
|
// (left or right), this function has the ability to detect the incomplete
|
||||||
|
// ones and filter them out for committing.
|
||||||
|
//
|
||||||
|
// Additionally, the assumption is made that there may exist leftover dangling
|
||||||
|
// nodes in the database. This function has the ability to detect the dangling
|
||||||
|
// nodes that fall within the path space of committed nodes (specifically on
|
||||||
|
// the path covered by internal extension nodes) and remove them from the
|
||||||
|
// database. This property ensures that the entire path space is uniquely
|
||||||
|
// occupied by committed nodes.
|
||||||
|
//
|
||||||
|
// Furthermore, all leftover dangling nodes along the path from committed nodes
|
||||||
|
// to the trie root (left and right boundaries) should be removed as well;
|
||||||
|
// otherwise, they might potentially disrupt the state healing process.
|
||||||
|
func (t *pathTrie) onTrieNode(path []byte, hash common.Hash, blob []byte) { |
||||||
|
// Filter out the nodes on the left boundary if skipLeftBoundary is
|
||||||
|
// configured. Nodes are considered to be on the left boundary if
|
||||||
|
// it's the first one to be committed, or the parent/ancestor of the
|
||||||
|
// first committed node.
|
||||||
|
if t.skipLeftBoundary && (t.first == nil || bytes.HasPrefix(t.first, path)) { |
||||||
|
if t.first == nil { |
||||||
|
// Memorize the path of first committed node, which is regarded
|
||||||
|
// as left boundary. Deep-copy is necessary as the path given
|
||||||
|
// is volatile.
|
||||||
|
t.first = append([]byte{}, path...) |
||||||
|
|
||||||
|
// The left boundary can be uniquely determined by the first committed node
|
||||||
|
// from stackTrie (e.g., N_1), as the shared path prefix between the first
|
||||||
|
// two inserted state items is deterministic (the path of N_3). The path
|
||||||
|
// from trie root towards the first committed node is considered the left
|
||||||
|
// boundary. The potential leftover dangling nodes on left boundary should
|
||||||
|
// be cleaned out.
|
||||||
|
//
|
||||||
|
// +-----+
|
||||||
|
// | N_3 | shared path prefix of state_1 and state_2
|
||||||
|
// +-----+
|
||||||
|
// /- -\
|
||||||
|
// +-----+ +-----+
|
||||||
|
// First committed node | N_1 | | N_2 | latest inserted node (contain state_2)
|
||||||
|
// +-----+ +-----+
|
||||||
|
//
|
||||||
|
// The node with the path of the first committed one (e.g, N_1) is not
|
||||||
|
// removed because it's a sibling of the nodes we want to commit, not
|
||||||
|
// the parent or ancestor.
|
||||||
|
for i := 0; i < len(path); i++ { |
||||||
|
t.delete(path[:i], false) |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
// If boundary filtering is not configured, or the node is not on the left
|
||||||
|
// boundary, commit it to database.
|
||||||
|
//
|
||||||
|
// Note: If the current committed node is an extension node, then the nodes
|
||||||
|
// falling within the path between itself and its standalone (not embedded
|
||||||
|
// in parent) child should be cleaned out for exclusively occupy the inner
|
||||||
|
// path.
|
||||||
|
//
|
||||||
|
// This is essential in snap sync to avoid leaving dangling nodes within
|
||||||
|
// this range covered by extension node which could potentially break the
|
||||||
|
// state healing.
|
||||||
|
//
|
||||||
|
// The extension node is detected if its path is the prefix of last committed
|
||||||
|
// one and path gap is larger than one. If the path gap is only one byte,
|
||||||
|
// the current node could either be a full node, or a extension with single
|
||||||
|
// byte key. In either case, no gaps will be left in the path.
|
||||||
|
if t.last != nil && bytes.HasPrefix(t.last, path) && len(t.last)-len(path) > 1 { |
||||||
|
for i := len(path) + 1; i < len(t.last); i++ { |
||||||
|
t.delete(t.last[:i], true) |
||||||
|
} |
||||||
|
} |
||||||
|
t.write(path, blob) |
||||||
|
|
||||||
|
// Update the last flag. Deep-copy is necessary as the provided path is volatile.
|
||||||
|
if t.last == nil { |
||||||
|
t.last = append([]byte{}, path...) |
||||||
|
} else { |
||||||
|
t.last = append(t.last[:0], path...) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// write commits the node write to provided database batch in path mode.
|
||||||
|
func (t *pathTrie) write(path []byte, blob []byte) { |
||||||
|
if t.owner == (common.Hash{}) { |
||||||
|
rawdb.WriteAccountTrieNode(t.batch, path, blob) |
||||||
|
} else { |
||||||
|
rawdb.WriteStorageTrieNode(t.batch, t.owner, path, blob) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (t *pathTrie) deleteAccountNode(path []byte, inner bool) { |
||||||
|
if inner { |
||||||
|
accountInnerLookupGauge.Inc(1) |
||||||
|
} else { |
||||||
|
accountOuterLookupGauge.Inc(1) |
||||||
|
} |
||||||
|
if !rawdb.ExistsAccountTrieNode(t.db, path) { |
||||||
|
return |
||||||
|
} |
||||||
|
if inner { |
||||||
|
accountInnerDeleteGauge.Inc(1) |
||||||
|
} else { |
||||||
|
accountOuterDeleteGauge.Inc(1) |
||||||
|
} |
||||||
|
rawdb.DeleteAccountTrieNode(t.batch, path) |
||||||
|
} |
||||||
|
|
||||||
|
func (t *pathTrie) deleteStorageNode(path []byte, inner bool) { |
||||||
|
if inner { |
||||||
|
storageInnerLookupGauge.Inc(1) |
||||||
|
} else { |
||||||
|
storageOuterLookupGauge.Inc(1) |
||||||
|
} |
||||||
|
if !rawdb.ExistsStorageTrieNode(t.db, t.owner, path) { |
||||||
|
return |
||||||
|
} |
||||||
|
if inner { |
||||||
|
storageInnerDeleteGauge.Inc(1) |
||||||
|
} else { |
||||||
|
storageOuterDeleteGauge.Inc(1) |
||||||
|
} |
||||||
|
rawdb.DeleteStorageTrieNode(t.batch, t.owner, path) |
||||||
|
} |
||||||
|
|
||||||
|
// delete commits the node deletion to provided database batch in path mode.
|
||||||
|
func (t *pathTrie) delete(path []byte, inner bool) { |
||||||
|
if t.owner == (common.Hash{}) { |
||||||
|
t.deleteAccountNode(path, inner) |
||||||
|
} else { |
||||||
|
t.deleteStorageNode(path, inner) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// update implements genTrie interface, inserting a (key, value) pair into the
|
||||||
|
// stack trie.
|
||||||
|
func (t *pathTrie) update(key, value []byte) error { |
||||||
|
return t.tr.Update(key, value) |
||||||
|
} |
||||||
|
|
||||||
|
// commit implements genTrie interface, flushing the right boundary if it's
|
||||||
|
// considered as complete. Otherwise, the nodes on the right boundary are
|
||||||
|
// discarded and cleaned up.
|
||||||
|
//
|
||||||
|
// Note, this function must be called before flushing database batch, otherwise,
|
||||||
|
// dangling nodes might be left in database.
|
||||||
|
func (t *pathTrie) commit(complete bool) common.Hash { |
||||||
|
// If the right boundary is claimed as complete, flush them out.
|
||||||
|
// The nodes on both left and right boundary will still be filtered
|
||||||
|
// out if left boundary filtering is configured.
|
||||||
|
if complete { |
||||||
|
// Commit all inserted but not yet committed nodes(on the right
|
||||||
|
// boundary) in the stackTrie.
|
||||||
|
hash := t.tr.Hash() |
||||||
|
if t.skipLeftBoundary { |
||||||
|
return common.Hash{} // hash is meaningless if left side is incomplete
|
||||||
|
} |
||||||
|
return hash |
||||||
|
} |
||||||
|
// Discard nodes on the right boundary as it's claimed as incomplete. These
|
||||||
|
// nodes might be incomplete due to missing children on the right side.
|
||||||
|
// Furthermore, the potential leftover nodes on right boundary should also
|
||||||
|
// be cleaned out.
|
||||||
|
//
|
||||||
|
// The right boundary can be uniquely determined by the last committed node
|
||||||
|
// from stackTrie (e.g., N_1), as the shared path prefix between the last
|
||||||
|
// two inserted state items is deterministic (the path of N_3). The path
|
||||||
|
// from trie root towards the last committed node is considered the right
|
||||||
|
// boundary (root to N_3).
|
||||||
|
//
|
||||||
|
// +-----+
|
||||||
|
// | N_3 | shared path prefix of last two states
|
||||||
|
// +-----+
|
||||||
|
// /- -\
|
||||||
|
// +-----+ +-----+
|
||||||
|
// Last committed node | N_1 | | N_2 | latest inserted node (contain last state)
|
||||||
|
// +-----+ +-----+
|
||||||
|
//
|
||||||
|
// Another interesting scenario occurs when the trie is committed due to
|
||||||
|
// too many items being accumulated in the batch. To flush them out to
|
||||||
|
// the database, the path of the last inserted node (N_2) is temporarily
|
||||||
|
// treated as an incomplete right boundary, and nodes on this path are
|
||||||
|
// removed (e.g. from root to N_3).
|
||||||
|
// However, this path will be reclaimed as an internal path by inserting
|
||||||
|
// more items after the batch flush. New nodes on this path can be committed
|
||||||
|
// with no issues as they are actually complete. Also, from a database
|
||||||
|
// perspective, first deleting and then rewriting is a valid data update.
|
||||||
|
for i := 0; i < len(t.last); i++ { |
||||||
|
t.delete(t.last[:i], false) |
||||||
|
} |
||||||
|
return common.Hash{} // the hash is meaningless for incomplete commit
|
||||||
|
} |
||||||
|
|
||||||
|
// hashTrie is a wrapper over the stackTrie for implementing genTrie interface.
|
||||||
|
type hashTrie struct { |
||||||
|
tr *trie.StackTrie |
||||||
|
} |
||||||
|
|
||||||
|
// newHashTrie initializes the hash trie.
|
||||||
|
func newHashTrie(batch ethdb.Batch) *hashTrie { |
||||||
|
return &hashTrie{tr: trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { |
||||||
|
rawdb.WriteLegacyTrieNode(batch, hash, blob) |
||||||
|
})} |
||||||
|
} |
||||||
|
|
||||||
|
// update implements genTrie interface, inserting a (key, value) pair into
|
||||||
|
// the stack trie.
|
||||||
|
func (t *hashTrie) update(key, value []byte) error { |
||||||
|
return t.tr.Update(key, value) |
||||||
|
} |
||||||
|
|
||||||
|
// commit implements genTrie interface, committing the nodes on right boundary.
|
||||||
|
func (t *hashTrie) commit(complete bool) common.Hash { |
||||||
|
if !complete { |
||||||
|
return common.Hash{} // the hash is meaningless for incomplete commit
|
||||||
|
} |
||||||
|
return t.tr.Hash() // return hash only if it's claimed as complete
|
||||||
|
} |
@ -0,0 +1,553 @@ |
|||||||
|
// Copyright 2024 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package snap |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"math/rand" |
||||||
|
"slices" |
||||||
|
"testing" |
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common" |
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb" |
||||||
|
"github.com/ethereum/go-ethereum/crypto" |
||||||
|
"github.com/ethereum/go-ethereum/ethdb" |
||||||
|
"github.com/ethereum/go-ethereum/internal/testrand" |
||||||
|
"github.com/ethereum/go-ethereum/trie" |
||||||
|
) |
||||||
|
|
||||||
|
type replayer struct { |
||||||
|
paths []string // sort in fifo order
|
||||||
|
hashes []common.Hash // empty for deletion
|
||||||
|
unknowns int // counter for unknown write
|
||||||
|
} |
||||||
|
|
||||||
|
func newBatchReplay() *replayer { |
||||||
|
return &replayer{} |
||||||
|
} |
||||||
|
|
||||||
|
func (r *replayer) decode(key []byte, value []byte) { |
||||||
|
account := rawdb.IsAccountTrieNode(key) |
||||||
|
storage := rawdb.IsStorageTrieNode(key) |
||||||
|
if !account && !storage { |
||||||
|
r.unknowns += 1 |
||||||
|
return |
||||||
|
} |
||||||
|
var path []byte |
||||||
|
if account { |
||||||
|
_, path = rawdb.ResolveAccountTrieNodeKey(key) |
||||||
|
} else { |
||||||
|
_, owner, inner := rawdb.ResolveStorageTrieNode(key) |
||||||
|
path = append(owner.Bytes(), inner...) |
||||||
|
} |
||||||
|
r.paths = append(r.paths, string(path)) |
||||||
|
|
||||||
|
if len(value) == 0 { |
||||||
|
r.hashes = append(r.hashes, common.Hash{}) |
||||||
|
} else { |
||||||
|
r.hashes = append(r.hashes, crypto.Keccak256Hash(value)) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// updates returns a set of effective mutations. Multiple mutations targeting
|
||||||
|
// the same node path will be merged in FIFO order.
|
||||||
|
func (r *replayer) modifies() map[string]common.Hash { |
||||||
|
set := make(map[string]common.Hash) |
||||||
|
for i, path := range r.paths { |
||||||
|
set[path] = r.hashes[i] |
||||||
|
} |
||||||
|
return set |
||||||
|
} |
||||||
|
|
||||||
|
// updates returns the number of updates.
|
||||||
|
func (r *replayer) updates() int { |
||||||
|
var count int |
||||||
|
for _, hash := range r.modifies() { |
||||||
|
if hash == (common.Hash{}) { |
||||||
|
continue |
||||||
|
} |
||||||
|
count++ |
||||||
|
} |
||||||
|
return count |
||||||
|
} |
||||||
|
|
||||||
|
// Put inserts the given value into the key-value data store.
|
||||||
|
func (r *replayer) Put(key []byte, value []byte) error { |
||||||
|
r.decode(key, value) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Delete removes the key from the key-value data store.
|
||||||
|
func (r *replayer) Delete(key []byte) error { |
||||||
|
r.decode(key, nil) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func byteToHex(str []byte) []byte { |
||||||
|
l := len(str) * 2 |
||||||
|
var nibbles = make([]byte, l) |
||||||
|
for i, b := range str { |
||||||
|
nibbles[i*2] = b / 16 |
||||||
|
nibbles[i*2+1] = b % 16 |
||||||
|
} |
||||||
|
return nibbles |
||||||
|
} |
||||||
|
|
||||||
|
// innerNodes returns the internal nodes narrowed by two boundaries along with
|
||||||
|
// the leftmost and rightmost sub-trie roots.
|
||||||
|
func innerNodes(first, last []byte, includeLeft, includeRight bool, nodes map[string]common.Hash, t *testing.T) (map[string]common.Hash, []byte, []byte) { |
||||||
|
var ( |
||||||
|
leftRoot []byte |
||||||
|
rightRoot []byte |
||||||
|
firstHex = byteToHex(first) |
||||||
|
lastHex = byteToHex(last) |
||||||
|
inner = make(map[string]common.Hash) |
||||||
|
) |
||||||
|
for path, hash := range nodes { |
||||||
|
if hash == (common.Hash{}) { |
||||||
|
t.Fatalf("Unexpected deletion, %v", []byte(path)) |
||||||
|
} |
||||||
|
// Filter out the siblings on the left side or the left boundary nodes.
|
||||||
|
if !includeLeft && (bytes.Compare(firstHex, []byte(path)) > 0 || bytes.HasPrefix(firstHex, []byte(path))) { |
||||||
|
continue |
||||||
|
} |
||||||
|
// Filter out the siblings on the right side or the right boundary nodes.
|
||||||
|
if !includeRight && (bytes.Compare(lastHex, []byte(path)) < 0 || bytes.HasPrefix(lastHex, []byte(path))) { |
||||||
|
continue |
||||||
|
} |
||||||
|
inner[path] = hash |
||||||
|
|
||||||
|
// Track the path of the leftmost sub trie root
|
||||||
|
if leftRoot == nil || bytes.Compare(leftRoot, []byte(path)) > 0 { |
||||||
|
leftRoot = []byte(path) |
||||||
|
} |
||||||
|
// Track the path of the rightmost sub trie root
|
||||||
|
if rightRoot == nil || |
||||||
|
(bytes.Compare(rightRoot, []byte(path)) < 0) || |
||||||
|
(bytes.Compare(rightRoot, []byte(path)) > 0 && bytes.HasPrefix(rightRoot, []byte(path))) { |
||||||
|
rightRoot = []byte(path) |
||||||
|
} |
||||||
|
} |
||||||
|
return inner, leftRoot, rightRoot |
||||||
|
} |
||||||
|
|
||||||
|
func buildPartial(owner common.Hash, db ethdb.KeyValueReader, batch ethdb.Batch, entries []*kv, first, last int) *replayer { |
||||||
|
tr := newPathTrie(owner, first != 0, db, batch) |
||||||
|
for i := first; i <= last; i++ { |
||||||
|
tr.update(entries[i].k, entries[i].v) |
||||||
|
} |
||||||
|
tr.commit(last == len(entries)-1) |
||||||
|
|
||||||
|
replay := newBatchReplay() |
||||||
|
batch.Replay(replay) |
||||||
|
|
||||||
|
return replay |
||||||
|
} |
||||||
|
|
||||||
|
// TestPartialGentree verifies if the trie constructed with partial states can
|
||||||
|
// generate consistent trie nodes that match those of the full trie.
|
||||||
|
func TestPartialGentree(t *testing.T) { |
||||||
|
for round := 0; round < 100; round++ { |
||||||
|
var ( |
||||||
|
n = rand.Intn(1024) + 10 |
||||||
|
entries []*kv |
||||||
|
) |
||||||
|
for i := 0; i < n; i++ { |
||||||
|
var val []byte |
||||||
|
if rand.Intn(3) == 0 { |
||||||
|
val = testrand.Bytes(3) |
||||||
|
} else { |
||||||
|
val = testrand.Bytes(32) |
||||||
|
} |
||||||
|
entries = append(entries, &kv{ |
||||||
|
k: testrand.Bytes(32), |
||||||
|
v: val, |
||||||
|
}) |
||||||
|
} |
||||||
|
slices.SortFunc(entries, (*kv).cmp) |
||||||
|
|
||||||
|
nodes := make(map[string]common.Hash) |
||||||
|
tr := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { |
||||||
|
nodes[string(path)] = hash |
||||||
|
}) |
||||||
|
for i := 0; i < len(entries); i++ { |
||||||
|
tr.Update(entries[i].k, entries[i].v) |
||||||
|
} |
||||||
|
tr.Hash() |
||||||
|
|
||||||
|
check := func(first, last int) { |
||||||
|
var ( |
||||||
|
db = rawdb.NewMemoryDatabase() |
||||||
|
batch = db.NewBatch() |
||||||
|
) |
||||||
|
// Build the partial tree with specific boundaries
|
||||||
|
r := buildPartial(common.Hash{}, db, batch, entries, first, last) |
||||||
|
if r.unknowns > 0 { |
||||||
|
t.Fatalf("Unknown database write: %d", r.unknowns) |
||||||
|
} |
||||||
|
|
||||||
|
// Ensure all the internal nodes are produced
|
||||||
|
var ( |
||||||
|
set = r.modifies() |
||||||
|
inner, _, _ = innerNodes(entries[first].k, entries[last].k, first == 0, last == len(entries)-1, nodes, t) |
||||||
|
) |
||||||
|
for path, hash := range inner { |
||||||
|
if _, ok := set[path]; !ok { |
||||||
|
t.Fatalf("Missing nodes %v", []byte(path)) |
||||||
|
} |
||||||
|
if hash != set[path] { |
||||||
|
t.Fatalf("Inconsistent node, want %x, got: %x", hash, set[path]) |
||||||
|
} |
||||||
|
} |
||||||
|
if r.updates() != len(inner) { |
||||||
|
t.Fatalf("Unexpected node write detected, want: %d, got: %d", len(inner), r.updates()) |
||||||
|
} |
||||||
|
} |
||||||
|
for j := 0; j < 100; j++ { |
||||||
|
var ( |
||||||
|
first int |
||||||
|
last int |
||||||
|
) |
||||||
|
for { |
||||||
|
first = rand.Intn(len(entries)) |
||||||
|
last = rand.Intn(len(entries)) |
||||||
|
if first <= last { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
check(first, last) |
||||||
|
} |
||||||
|
var cases = []struct { |
||||||
|
first int |
||||||
|
last int |
||||||
|
}{ |
||||||
|
{0, len(entries) - 1}, // full
|
||||||
|
{1, len(entries) - 1}, // no left
|
||||||
|
{2, len(entries) - 1}, // no left
|
||||||
|
{2, len(entries) - 2}, // no left and right
|
||||||
|
{2, len(entries) - 2}, // no left and right
|
||||||
|
{len(entries) / 2, len(entries) / 2}, // single
|
||||||
|
{0, 0}, // single first
|
||||||
|
{len(entries) - 1, len(entries) - 1}, // single last
|
||||||
|
} |
||||||
|
for _, c := range cases { |
||||||
|
check(c.first, c.last) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestGentreeDanglingClearing tests if the dangling nodes falling within the
|
||||||
|
// path space of constructed tree can be correctly removed.
|
||||||
|
func TestGentreeDanglingClearing(t *testing.T) { |
||||||
|
for round := 0; round < 100; round++ { |
||||||
|
var ( |
||||||
|
n = rand.Intn(1024) + 10 |
||||||
|
entries []*kv |
||||||
|
) |
||||||
|
for i := 0; i < n; i++ { |
||||||
|
var val []byte |
||||||
|
if rand.Intn(3) == 0 { |
||||||
|
val = testrand.Bytes(3) |
||||||
|
} else { |
||||||
|
val = testrand.Bytes(32) |
||||||
|
} |
||||||
|
entries = append(entries, &kv{ |
||||||
|
k: testrand.Bytes(32), |
||||||
|
v: val, |
||||||
|
}) |
||||||
|
} |
||||||
|
slices.SortFunc(entries, (*kv).cmp) |
||||||
|
|
||||||
|
nodes := make(map[string]common.Hash) |
||||||
|
tr := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { |
||||||
|
nodes[string(path)] = hash |
||||||
|
}) |
||||||
|
for i := 0; i < len(entries); i++ { |
||||||
|
tr.Update(entries[i].k, entries[i].v) |
||||||
|
} |
||||||
|
tr.Hash() |
||||||
|
|
||||||
|
check := func(first, last int) { |
||||||
|
var ( |
||||||
|
db = rawdb.NewMemoryDatabase() |
||||||
|
batch = db.NewBatch() |
||||||
|
) |
||||||
|
// Write the junk nodes as the dangling
|
||||||
|
var injects []string |
||||||
|
for path := range nodes { |
||||||
|
for i := 0; i < len(path); i++ { |
||||||
|
_, ok := nodes[path[:i]] |
||||||
|
if ok { |
||||||
|
continue |
||||||
|
} |
||||||
|
injects = append(injects, path[:i]) |
||||||
|
} |
||||||
|
} |
||||||
|
if len(injects) == 0 { |
||||||
|
return |
||||||
|
} |
||||||
|
for _, path := range injects { |
||||||
|
rawdb.WriteAccountTrieNode(db, []byte(path), testrand.Bytes(32)) |
||||||
|
} |
||||||
|
|
||||||
|
// Build the partial tree with specific range
|
||||||
|
replay := buildPartial(common.Hash{}, db, batch, entries, first, last) |
||||||
|
if replay.unknowns > 0 { |
||||||
|
t.Fatalf("Unknown database write: %d", replay.unknowns) |
||||||
|
} |
||||||
|
set := replay.modifies() |
||||||
|
|
||||||
|
// Make sure the injected junks falling within the path space of
|
||||||
|
// committed trie nodes are correctly deleted.
|
||||||
|
_, leftRoot, rightRoot := innerNodes(entries[first].k, entries[last].k, first == 0, last == len(entries)-1, nodes, t) |
||||||
|
for _, path := range injects { |
||||||
|
if bytes.Compare([]byte(path), leftRoot) < 0 && !bytes.HasPrefix(leftRoot, []byte(path)) { |
||||||
|
continue |
||||||
|
} |
||||||
|
if bytes.Compare([]byte(path), rightRoot) > 0 { |
||||||
|
continue |
||||||
|
} |
||||||
|
if hash, ok := set[path]; !ok || hash != (common.Hash{}) { |
||||||
|
t.Fatalf("Missing delete, %v", []byte(path)) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
for j := 0; j < 100; j++ { |
||||||
|
var ( |
||||||
|
first int |
||||||
|
last int |
||||||
|
) |
||||||
|
for { |
||||||
|
first = rand.Intn(len(entries)) |
||||||
|
last = rand.Intn(len(entries)) |
||||||
|
if first <= last { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
check(first, last) |
||||||
|
} |
||||||
|
var cases = []struct { |
||||||
|
first int |
||||||
|
last int |
||||||
|
}{ |
||||||
|
{0, len(entries) - 1}, // full
|
||||||
|
{1, len(entries) - 1}, // no left
|
||||||
|
{2, len(entries) - 1}, // no left
|
||||||
|
{2, len(entries) - 2}, // no left and right
|
||||||
|
{2, len(entries) - 2}, // no left and right
|
||||||
|
{len(entries) / 2, len(entries) / 2}, // single
|
||||||
|
{0, 0}, // single first
|
||||||
|
{len(entries) - 1, len(entries) - 1}, // single last
|
||||||
|
} |
||||||
|
for _, c := range cases { |
||||||
|
check(c.first, c.last) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestFlushPartialTree tests the gentrie can produce complete inner trie nodes
|
||||||
|
// even with lots of batch flushes.
|
||||||
|
func TestFlushPartialTree(t *testing.T) { |
||||||
|
var entries []*kv |
||||||
|
for i := 0; i < 1024; i++ { |
||||||
|
var val []byte |
||||||
|
if rand.Intn(3) == 0 { |
||||||
|
val = testrand.Bytes(3) |
||||||
|
} else { |
||||||
|
val = testrand.Bytes(32) |
||||||
|
} |
||||||
|
entries = append(entries, &kv{ |
||||||
|
k: testrand.Bytes(32), |
||||||
|
v: val, |
||||||
|
}) |
||||||
|
} |
||||||
|
slices.SortFunc(entries, (*kv).cmp) |
||||||
|
|
||||||
|
nodes := make(map[string]common.Hash) |
||||||
|
tr := trie.NewStackTrie(func(path []byte, hash common.Hash, blob []byte) { |
||||||
|
nodes[string(path)] = hash |
||||||
|
}) |
||||||
|
for i := 0; i < len(entries); i++ { |
||||||
|
tr.Update(entries[i].k, entries[i].v) |
||||||
|
} |
||||||
|
tr.Hash() |
||||||
|
|
||||||
|
var cases = []struct { |
||||||
|
first int |
||||||
|
last int |
||||||
|
}{ |
||||||
|
{0, len(entries) - 1}, // full
|
||||||
|
{1, len(entries) - 1}, // no left
|
||||||
|
{10, len(entries) - 1}, // no left
|
||||||
|
{10, len(entries) - 2}, // no left and right
|
||||||
|
{10, len(entries) - 10}, // no left and right
|
||||||
|
{11, 11}, // single
|
||||||
|
{0, 0}, // single first
|
||||||
|
{len(entries) - 1, len(entries) - 1}, // single last
|
||||||
|
} |
||||||
|
for _, c := range cases { |
||||||
|
var ( |
||||||
|
db = rawdb.NewMemoryDatabase() |
||||||
|
batch = db.NewBatch() |
||||||
|
combined = db.NewBatch() |
||||||
|
) |
||||||
|
inner, _, _ := innerNodes(entries[c.first].k, entries[c.last].k, c.first == 0, c.last == len(entries)-1, nodes, t) |
||||||
|
|
||||||
|
tr := newPathTrie(common.Hash{}, c.first != 0, db, batch) |
||||||
|
for i := c.first; i <= c.last; i++ { |
||||||
|
tr.update(entries[i].k, entries[i].v) |
||||||
|
if rand.Intn(2) == 0 { |
||||||
|
tr.commit(false) |
||||||
|
|
||||||
|
batch.Replay(combined) |
||||||
|
batch.Write() |
||||||
|
batch.Reset() |
||||||
|
} |
||||||
|
} |
||||||
|
tr.commit(c.last == len(entries)-1) |
||||||
|
|
||||||
|
batch.Replay(combined) |
||||||
|
batch.Write() |
||||||
|
batch.Reset() |
||||||
|
|
||||||
|
r := newBatchReplay() |
||||||
|
combined.Replay(r) |
||||||
|
|
||||||
|
// Ensure all the internal nodes are produced
|
||||||
|
set := r.modifies() |
||||||
|
for path, hash := range inner { |
||||||
|
if _, ok := set[path]; !ok { |
||||||
|
t.Fatalf("Missing nodes %v", []byte(path)) |
||||||
|
} |
||||||
|
if hash != set[path] { |
||||||
|
t.Fatalf("Inconsistent node, want %x, got: %x", hash, set[path]) |
||||||
|
} |
||||||
|
} |
||||||
|
if r.updates() != len(inner) { |
||||||
|
t.Fatalf("Unexpected node write detected, want: %d, got: %d", len(inner), r.updates()) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestBoundSplit ensures two consecutive trie chunks are not overlapped with
|
||||||
|
// each other.
|
||||||
|
func TestBoundSplit(t *testing.T) { |
||||||
|
var entries []*kv |
||||||
|
for i := 0; i < 1024; i++ { |
||||||
|
var val []byte |
||||||
|
if rand.Intn(3) == 0 { |
||||||
|
val = testrand.Bytes(3) |
||||||
|
} else { |
||||||
|
val = testrand.Bytes(32) |
||||||
|
} |
||||||
|
entries = append(entries, &kv{ |
||||||
|
k: testrand.Bytes(32), |
||||||
|
v: val, |
||||||
|
}) |
||||||
|
} |
||||||
|
slices.SortFunc(entries, (*kv).cmp) |
||||||
|
|
||||||
|
for j := 0; j < 100; j++ { |
||||||
|
var ( |
||||||
|
next int |
||||||
|
last int |
||||||
|
db = rawdb.NewMemoryDatabase() |
||||||
|
|
||||||
|
lastRightRoot []byte |
||||||
|
) |
||||||
|
for { |
||||||
|
if next == len(entries) { |
||||||
|
break |
||||||
|
} |
||||||
|
last = rand.Intn(len(entries)-next) + next |
||||||
|
|
||||||
|
r := buildPartial(common.Hash{}, db, db.NewBatch(), entries, next, last) |
||||||
|
set := r.modifies() |
||||||
|
|
||||||
|
// Skip if the chunk is zero-size
|
||||||
|
if r.updates() == 0 { |
||||||
|
next = last + 1 |
||||||
|
continue |
||||||
|
} |
||||||
|
|
||||||
|
// Ensure the updates in two consecutive chunks are not overlapped.
|
||||||
|
// The only overlapping part should be deletion.
|
||||||
|
if lastRightRoot != nil && len(set) > 0 { |
||||||
|
// Derive the path of left-most node in this chunk
|
||||||
|
var leftRoot []byte |
||||||
|
for path, hash := range r.modifies() { |
||||||
|
if hash == (common.Hash{}) { |
||||||
|
t.Fatalf("Unexpected deletion %v", []byte(path)) |
||||||
|
} |
||||||
|
if leftRoot == nil || bytes.Compare(leftRoot, []byte(path)) > 0 { |
||||||
|
leftRoot = []byte(path) |
||||||
|
} |
||||||
|
} |
||||||
|
if bytes.HasPrefix(lastRightRoot, leftRoot) || bytes.HasPrefix(leftRoot, lastRightRoot) { |
||||||
|
t.Fatalf("Two chunks are not correctly separated, lastRight: %v, left: %v", lastRightRoot, leftRoot) |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Track the updates as the last chunk
|
||||||
|
var rightRoot []byte |
||||||
|
for path := range set { |
||||||
|
if rightRoot == nil || |
||||||
|
(bytes.Compare(rightRoot, []byte(path)) < 0) || |
||||||
|
(bytes.Compare(rightRoot, []byte(path)) > 0 && bytes.HasPrefix(rightRoot, []byte(path))) { |
||||||
|
rightRoot = []byte(path) |
||||||
|
} |
||||||
|
} |
||||||
|
lastRightRoot = rightRoot |
||||||
|
next = last + 1 |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TestTinyPartialTree tests if the partial tree is too tiny(has less than two
|
||||||
|
// states), then nothing should be committed.
|
||||||
|
func TestTinyPartialTree(t *testing.T) { |
||||||
|
var entries []*kv |
||||||
|
for i := 0; i < 1024; i++ { |
||||||
|
var val []byte |
||||||
|
if rand.Intn(3) == 0 { |
||||||
|
val = testrand.Bytes(3) |
||||||
|
} else { |
||||||
|
val = testrand.Bytes(32) |
||||||
|
} |
||||||
|
entries = append(entries, &kv{ |
||||||
|
k: testrand.Bytes(32), |
||||||
|
v: val, |
||||||
|
}) |
||||||
|
} |
||||||
|
slices.SortFunc(entries, (*kv).cmp) |
||||||
|
|
||||||
|
for i := 0; i < len(entries); i++ { |
||||||
|
next := i |
||||||
|
last := i + 1 |
||||||
|
if last >= len(entries) { |
||||||
|
last = len(entries) - 1 |
||||||
|
} |
||||||
|
db := rawdb.NewMemoryDatabase() |
||||||
|
r := buildPartial(common.Hash{}, db, db.NewBatch(), entries, next, last) |
||||||
|
|
||||||
|
if next != 0 && last != len(entries)-1 { |
||||||
|
if r.updates() != 0 { |
||||||
|
t.Fatalf("Unexpected data writes, got: %d", r.updates()) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
Loading…
Reference in new issue